2355 lines
81 KiB
JavaScript
2355 lines
81 KiB
JavaScript
/*
|
|
** 2000-05-29
|
|
**
|
|
** The author disclaims copyright to this source code. In place of
|
|
** a legal notice, here is a blessing:
|
|
**
|
|
** May you do good and not evil.
|
|
** May you find forgiveness for yourself and forgive others.
|
|
** May you share freely, never taking more than you give.
|
|
**
|
|
** Based on SQLite distribution v3.17.0
|
|
** Adopted for JavaScript by Artem Butusov <art.sormy@gmail.com>
|
|
**
|
|
*************************************************************************
|
|
** Driver template for the LEMON parser generator.
|
|
**
|
|
** The "lemon" program processes an LALR(1) input grammar file, then uses
|
|
** this template to construct a parser. The "lemon" program inserts text
|
|
** at each "%%" line. Also, any "P-a-r-s-e" identifer prefix (without the
|
|
** interstitial "-" characters) contained in this template is changed into
|
|
** the value of the %name directive from the grammar. Otherwise, the content
|
|
** of this template is copied straight through into the generate parser
|
|
** source file.
|
|
**
|
|
** The following is the concatenation of all %include directives from the
|
|
** input grammar file:
|
|
*/
|
|
/************ Begin %include sections from the grammar ************************/
|
|
|
|
// include something
|
|
/**************** End of %include directives **********************************/
|
|
function Parser() {
|
|
/* These constants specify the various numeric values for terminal symbols
|
|
** in a format understandable to "makeheaders".
|
|
***************** Begin makeheaders token definitions *************************/
|
|
this.TOKEN_OR = 1;
|
|
this.TOKEN_AND = 2;
|
|
this.TOKEN_NOT = 3;
|
|
this.TOKEN_INTEGER_LITERAL = 4;
|
|
this.TOKEN_FLOAT_LITERAL = 5;
|
|
this.TOKEN_BOOL_LITERAL = 6;
|
|
this.TOKEN_STRING_LITERAL = 7;
|
|
this.TOKEN_ID = 8;
|
|
this.TOKEN_EQ = 9;
|
|
this.TOKEN_NEQ = 10;
|
|
this.TOKEN_GT = 11;
|
|
this.TOKEN_GTE = 12;
|
|
this.TOKEN_LT = 13;
|
|
this.TOKEN_LTE = 14;
|
|
this.TOKEN_LIKE = 15;
|
|
this.TOKEN_NLIKE = 16;
|
|
this.TOKEN_LCB = 17;
|
|
this.TOKEN_RCB = 18;
|
|
this.TOKEN_COMMA = 19;
|
|
this.TOKEN_ADDRESS = 20;
|
|
this.TOKEN_LSB = 21;
|
|
this.TOKEN_RSB = 22;
|
|
this.TOKEN_DOT = 23;
|
|
this.TOKEN_OID = 24;
|
|
this.TOKEN_TIMEDIFF = 25;
|
|
this.TOKEN_COLON = 26;
|
|
/**************** End makeheaders token definitions ***************************/
|
|
|
|
/* The next sections is a series of control #defines.
|
|
** various aspects of the generated parser.
|
|
** YYNOCODE is a number of type YYCODETYPE that is not used for
|
|
** any terminal or nonterminal symbol.
|
|
** YYFALLBACK If defined, this indicates that one or more tokens
|
|
** (also known as: "terminal symbols") have fall-back
|
|
** values which should be used if the original symbol
|
|
** would not parse. This permits keywords to sometimes
|
|
** be used as identifiers, for example.
|
|
** YYSTACKDEPTH is the maximum depth of the parser's stack. If
|
|
** zero the stack is dynamically sized using realloc()
|
|
** YYERRORSYMBOL is the code number of the error symbol. If not
|
|
** defined, then do no error processing.
|
|
** YYNSTATE the combined number of states.
|
|
** YYNRULE the number of rules in the grammar
|
|
** YY_MAX_SHIFT Maximum value for shift actions
|
|
** YY_MIN_SHIFTREDUCE Minimum value for shift-reduce actions
|
|
** YY_MAX_SHIFTREDUCE Maximum value for shift-reduce actions
|
|
** YY_MIN_REDUCE Maximum value for reduce actions
|
|
** YY_ERROR_ACTION The yy_action[] code for syntax error
|
|
** YY_ACCEPT_ACTION The yy_action[] code for accept
|
|
** YY_NO_ACTION The yy_action[] code for no-op
|
|
*/
|
|
/************* Begin control #defines *****************************************/
|
|
this.YYNOCODE = 55;
|
|
this.YYSTACKDEPTH = 100;
|
|
this.YYFALLBACK = false;
|
|
this.YYNSTATE = 38;
|
|
this.YYNRULE = 49;
|
|
this.YY_MAX_SHIFT = 37;
|
|
this.YY_MIN_SHIFTREDUCE = 80;
|
|
this.YY_MAX_SHIFTREDUCE = 128;
|
|
this.YY_MIN_REDUCE = 129;
|
|
this.YY_MAX_REDUCE = 177;
|
|
this.YY_ERROR_ACTION = 178;
|
|
this.YY_ACCEPT_ACTION = 179;
|
|
this.YY_NO_ACTION = 180;
|
|
/************* End control #defines *******************************************/
|
|
|
|
/* Define the yytestcase() macro to be a no-op if is not already defined
|
|
** otherwise.
|
|
**
|
|
** Applications can choose to define yytestcase() in the %include section
|
|
** to a macro that can assist in verifying code coverage. For production
|
|
** code the yytestcase() macro should be turned off. But it is useful
|
|
** for testing.
|
|
*/
|
|
if (!this.yytestcase) {
|
|
this.yytestcase = function() {};
|
|
}
|
|
|
|
|
|
/* Next are the tables used to determine what action to take based on the
|
|
** current state and lookahead token. These tables are used to implement
|
|
** functions that take a state number and lookahead value and return an
|
|
** action integer.
|
|
**
|
|
** Suppose the action integer is N. Then the action is determined as
|
|
** follows
|
|
**
|
|
** 0 <= N <= YY_MAX_SHIFT Shift N. That is, push the lookahead
|
|
** token onto the stack and goto state N.
|
|
**
|
|
** N between YY_MIN_SHIFTREDUCE Shift to an arbitrary state then
|
|
** and YY_MAX_SHIFTREDUCE reduce by rule N-YY_MIN_SHIFTREDUCE.
|
|
**
|
|
** N between YY_MIN_REDUCE Reduce by rule N-YY_MIN_REDUCE
|
|
** and YY_MAX_REDUCE
|
|
**
|
|
** N == YY_ERROR_ACTION A syntax error has occurred.
|
|
**
|
|
** N == YY_ACCEPT_ACTION The parser accepts its input.
|
|
**
|
|
** N == YY_NO_ACTION No such action. Denotes unused
|
|
** slots in the yy_action[] table.
|
|
**
|
|
** The action table is constructed as a single large table named yy_action[].
|
|
** Given state S and lookahead X, the action is computed as either:
|
|
**
|
|
** (A) N = yy_action[ yy_shift_ofst[S] + X ]
|
|
** (B) N = yy_default[S]
|
|
**
|
|
** The (A) formula is preferred. The B formula is used instead if:
|
|
** (1) The yy_shift_ofst[S]+X value is out of range, or
|
|
** (2) yy_lookahead[yy_shift_ofst[S]+X] is not equal to X, or
|
|
** (3) yy_shift_ofst[S] equal YY_SHIFT_USE_DFLT.
|
|
** (Implementation note: YY_SHIFT_USE_DFLT is chosen so that
|
|
** YY_SHIFT_USE_DFLT+X will be out of range for all possible lookaheads X.
|
|
** Hence only tests (1) and (2) need to be evaluated.)
|
|
**
|
|
** The formulas above are for computing the action when the lookahead is
|
|
** a terminal symbol. If the lookahead is a non-terminal (as occurs after
|
|
** a reduce action) then the yy_reduce_ofst[] array is used in place of
|
|
** the yy_shift_ofst[] array and YY_REDUCE_USE_DFLT is used in place of
|
|
** YY_SHIFT_USE_DFLT.
|
|
**
|
|
** The following are the tables generated in this section:
|
|
**
|
|
** yy_action[] A single table containing all actions.
|
|
** yy_lookahead[] A table containing the lookahead for each entry in
|
|
** yy_action. Used to detect hash collisions.
|
|
** yy_shift_ofst[] For each state, the offset into yy_action for
|
|
** shifting terminals.
|
|
** yy_reduce_ofst[] For each state, the offset into yy_action for
|
|
** shifting non-terminals after a reduce.
|
|
** yy_default[] Default action for each state.
|
|
**
|
|
*********** Begin parsing tables **********************************************/
|
|
this.yy_action = [
|
|
/* 0 */
|
|
179, 24, 26, 83, 85, 87, 25, 22, 103, 104,
|
|
/* 10 */
|
|
105, 106, 107, 108, 109, 110, 111, 112, 113, 88,
|
|
/* 20 */
|
|
91, 120, 23, 88, 126, 128, 82, 90, 22, 103,
|
|
/* 30 */
|
|
104, 105, 106, 107, 108, 109, 110, 111, 112, 113,
|
|
/* 40 */
|
|
94, 90, 122, 90, 121, 90, 22, 103, 104, 105,
|
|
/* 50 */
|
|
106, 107, 108, 109, 110, 111, 112, 113, 37, 32,
|
|
/* 60 */
|
|
31, 3, 4, 90, 22, 103, 104, 105, 106, 107,
|
|
/* 70 */
|
|
108, 109, 110, 111, 112, 113, 92, 27, 114, 16,
|
|
/* 80 */
|
|
28, 90, 22, 103, 104, 105, 106, 107, 108, 109,
|
|
/* 90 */
|
|
110, 111, 112, 113, 2, 82, 84, 86, 88, 91,
|
|
/* 100 */
|
|
102, 83, 85, 87, 89, 29, 19, 115, 1, 138,
|
|
/* 110 */
|
|
116, 36, 129, 3, 4, 33, 30, 130, 127, 120,
|
|
/* 120 */
|
|
35, 34, 126, 128, 101, 83, 85, 87, 89, 100,
|
|
/* 130 */
|
|
83, 85, 87, 89, 99, 83, 85, 87, 89, 17,
|
|
/* 140 */
|
|
2, 18, 20, 120, 88, 91, 126, 128, 120, 125,
|
|
/* 150 */
|
|
13, 126, 128, 120, 1, 15, 126, 128, 98, 83,
|
|
/* 160 */
|
|
85, 87, 89, 97, 83, 85, 87, 89, 96, 83,
|
|
/* 170 */
|
|
85, 87, 89, 119, 21, 14, 4, 120, 131, 131,
|
|
/* 180 */
|
|
126, 128, 120, 131, 131, 126, 128, 120, 131, 131,
|
|
/* 190 */
|
|
126, 128, 95, 83, 85, 87, 89, 12, 11, 10,
|
|
/* 200 */
|
|
9, 8, 7, 6, 5, 82, 84, 86, 88, 131,
|
|
/* 210 */
|
|
131, 120, 131, 131, 126, 128, 131, 131, 131, 131,
|
|
/* 220 */
|
|
131, 36, 131, 131, 131, 33, 30,
|
|
];
|
|
this.yy_lookahead = [
|
|
/* 0 */
|
|
28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
|
|
/* 10 */
|
|
38, 39, 40, 41, 42, 43, 44, 45, 46, 7,
|
|
/* 20 */
|
|
8, 49, 29, 7, 52, 53, 4, 34, 35, 36,
|
|
/* 30 */
|
|
37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
|
|
/* 40 */
|
|
29, 34, 35, 34, 35, 34, 35, 36, 37, 38,
|
|
/* 50 */
|
|
39, 40, 41, 42, 43, 44, 45, 46, 29, 50,
|
|
/* 60 */
|
|
51, 1, 2, 34, 35, 36, 37, 38, 39, 40,
|
|
/* 70 */
|
|
41, 42, 43, 44, 45, 46, 29, 31, 18, 31,
|
|
/* 80 */
|
|
31, 34, 35, 36, 37, 38, 39, 40, 41, 42,
|
|
/* 90 */
|
|
43, 44, 45, 46, 3, 4, 5, 6, 7, 8,
|
|
/* 100 */
|
|
30, 31, 32, 33, 34, 31, 31, 34, 17, 0,
|
|
/* 110 */
|
|
34, 20, 0, 1, 2, 24, 25, 0, 22, 49,
|
|
/* 120 */
|
|
47, 48, 52, 53, 30, 31, 32, 33, 34, 30,
|
|
/* 130 */
|
|
31, 32, 33, 34, 30, 31, 32, 33, 34, 26,
|
|
/* 140 */
|
|
3, 26, 21, 49, 7, 8, 52, 53, 49, 22,
|
|
/* 150 */
|
|
21, 52, 53, 49, 17, 23, 52, 53, 30, 31,
|
|
/* 160 */
|
|
32, 33, 34, 30, 31, 32, 33, 34, 30, 31,
|
|
/* 170 */
|
|
32, 33, 34, 22, 19, 21, 2, 49, 54, 54,
|
|
/* 180 */
|
|
52, 53, 49, 54, 54, 52, 53, 49, 54, 54,
|
|
/* 190 */
|
|
52, 53, 30, 31, 32, 33, 34, 9, 10, 11,
|
|
/* 200 */
|
|
12, 13, 14, 15, 16, 4, 5, 6, 7, 54,
|
|
/* 210 */
|
|
54, 49, 54, 54, 52, 53, 54, 54, 54, 54,
|
|
/* 220 */
|
|
54, 20, 54, 54, 54, 24, 25,
|
|
];
|
|
this.YY_SHIFT_USE_DFLT = 227;
|
|
this.YY_SHIFT_COUNT = 37;
|
|
this.YY_SHIFT_MIN = 0;
|
|
this.YY_SHIFT_MAX = 201;
|
|
this.yy_shift_ofst = [
|
|
/* 0 */
|
|
91, 137, 137, 137, 137, 201, 201, 201, 201, 201,
|
|
/* 10 */
|
|
201, 201, 201, 12, 16, 12, 22, 22, 22, 22,
|
|
/* 20 */
|
|
22, 16, 188, 60, 112, 109, 117, 96, 113, 115,
|
|
/* 30 */
|
|
121, 127, 132, 129, 151, 155, 154, 174,
|
|
];
|
|
this.YY_REDUCE_USE_DFLT = -29;
|
|
this.YY_REDUCE_COUNT = 21;
|
|
this.YY_REDUCE_MIN = -28;
|
|
this.YY_REDUCE_MAX = 162;
|
|
this.yy_reduce_ofst = [
|
|
/* 0 */
|
|
-28, -7, 11, 29, 47, 70, 94, 99, 104, 128,
|
|
/* 10 */
|
|
133, 138, 162, 9, 73, 7, 46, 48, 49, 74,
|
|
/* 20 */
|
|
75, 76,
|
|
];
|
|
this.yy_default = [
|
|
/* 0 */
|
|
178, 178, 178, 178, 178, 178, 178, 178, 178, 178,
|
|
/* 10 */
|
|
178, 178, 178, 173, 167, 178, 178, 178, 178, 178,
|
|
/* 20 */
|
|
178, 178, 178, 178, 178, 139, 178, 178, 178, 178,
|
|
/* 30 */
|
|
178, 178, 172, 178, 178, 166, 178, 142,
|
|
];
|
|
/********** End of lemon-generated parsing tables *****************************/
|
|
|
|
/* The next table maps tokens (terminal symbols) into fallback tokens.
|
|
** If a construct like the following:
|
|
**
|
|
** %fallback ID X Y Z.
|
|
**
|
|
** appears in the grammar, then ID becomes a fallback token for X, Y,
|
|
** and Z. Whenever one of the tokens X, Y, or Z is input to the parser
|
|
** but it does not parse, the type of the token is changed to ID and
|
|
** the parse is retried before an error is thrown.
|
|
**
|
|
** This feature can be used, for example, to cause some keywords in a language
|
|
** to revert to identifiers if they keyword does not apply in the context where
|
|
** it appears.
|
|
*/
|
|
this.yyFallback = [];
|
|
|
|
/* The following structure represents a single element of the
|
|
** parser's stack. Information stored includes:
|
|
**
|
|
** + The state number for the parser at this level of the stack.
|
|
**
|
|
** + The value of the token stored at this level of the stack.
|
|
** (In other words, the "major" token.)
|
|
**
|
|
** + The semantic value stored at this level of the stack. This is
|
|
** the information used by the action routines in the grammar.
|
|
** It is sometimes called the "minor" token.
|
|
**
|
|
** After the "shift" half of a SHIFTREDUCE action, the stateno field
|
|
** actually contains the reduce action for the second half of the
|
|
** SHIFTREDUCE.
|
|
*/
|
|
//{
|
|
// stateno, /* The state-number, or reduce action in SHIFTREDUCE */
|
|
// major, /* The major token value. This is the code
|
|
// ** number for the token at this stack level */
|
|
// minor, /* The user-supplied minor token value. This
|
|
// ** is the value of the token */
|
|
//}
|
|
|
|
/* The state of the parser is completely contained in an instance of
|
|
** the following structure */
|
|
this.yyhwm = 0; /* High-water mark of the stack */
|
|
this.yyerrcnt = -1; /* Shifts left before out of the error */
|
|
this.yystack = null; /* The parser's stack */
|
|
this.yyidx = -1; /* Stack index of current element in the stack */
|
|
|
|
this.yyTraceCallback = null;
|
|
this.yyTracePrompt = "";
|
|
|
|
/*
|
|
** Turn parser tracing on by giving a stream to which to write the trace
|
|
** and a prompt to preface each trace message. Tracing is turned off
|
|
** by making either argument NULL
|
|
**
|
|
** Inputs:
|
|
** <ul>
|
|
** <li> A callback to which trace output should be written.
|
|
** If NULL, then tracing is turned off.
|
|
** <li> A prefix string written at the beginning of every
|
|
** line of trace output. Default is "".
|
|
** </ul>
|
|
**
|
|
** Outputs:
|
|
** None.
|
|
*/
|
|
this.setTraceCallback = function(callback, prompt) {
|
|
this.yyTraceCallback = callback;
|
|
this.yyTracePrompt = prompt || "";
|
|
}
|
|
|
|
this.trace = function(message) {
|
|
this.yyTraceCallback(this.yyTracePrompt + message + "\n");
|
|
}
|
|
|
|
/* For tracing shifts, the names of all terminals and nonterminals
|
|
** are required. The following table supplies these names */
|
|
this.yyTokenName = [
|
|
"$", "OR", "AND", "NOT",
|
|
"INTEGER_LITERAL", "FLOAT_LITERAL", "BOOL_LITERAL", "STRING_LITERAL",
|
|
"ID", "EQ", "NEQ", "GT",
|
|
"GTE", "LT", "LTE", "LIKE",
|
|
"NLIKE", "LCB", "RCB", "COMMA",
|
|
"ADDRESS", "LSB", "RSB", "DOT",
|
|
"OID", "TIMEDIFF", "COLON", "error",
|
|
"main", "expr", "literal", "integer_literal",
|
|
"float_literal", "bool_literal", "string_literal", "id",
|
|
"and", "or", "not", "eq",
|
|
"neq", "gt", "gte", "lt",
|
|
"lte", "like", "nlike", "address_literal_content",
|
|
"address_literal_content_or_empty", "address_literal", "oid_literal_content", "oid_literal_content_or_empty",
|
|
"oid_literal", "time_diff_literal",
|
|
];
|
|
|
|
/* For tracing reduce actions, the names of all rules are required.
|
|
*/
|
|
this.yyRuleName = [
|
|
/* 0 */
|
|
"main ::= expr",
|
|
/* 1 */
|
|
"main ::= literal",
|
|
/* 2 */
|
|
"integer_literal ::= INTEGER_LITERAL",
|
|
/* 3 */
|
|
"literal ::= integer_literal",
|
|
/* 4 */
|
|
"float_literal ::= FLOAT_LITERAL",
|
|
/* 5 */
|
|
"literal ::= float_literal",
|
|
/* 6 */
|
|
"bool_literal ::= BOOL_LITERAL",
|
|
/* 7 */
|
|
"literal ::= bool_literal",
|
|
/* 8 */
|
|
"string_literal ::= STRING_LITERAL",
|
|
/* 9 */
|
|
"literal ::= string_literal",
|
|
/* 10 */
|
|
"id ::= string_literal",
|
|
/* 11 */
|
|
"id ::= ID",
|
|
/* 12 */
|
|
"and ::= expr AND expr",
|
|
/* 13 */
|
|
"or ::= expr OR expr",
|
|
/* 14 */
|
|
"not ::= NOT expr",
|
|
/* 15 */
|
|
"eq ::= id EQ literal",
|
|
/* 16 */
|
|
"neq ::= id NEQ literal",
|
|
/* 17 */
|
|
"gt ::= id GT literal",
|
|
/* 18 */
|
|
"gte ::= id GTE literal",
|
|
/* 19 */
|
|
"lt ::= id LT literal",
|
|
/* 20 */
|
|
"lte ::= id LTE literal",
|
|
/* 21 */
|
|
"like ::= id LIKE literal",
|
|
/* 22 */
|
|
"nlike ::= id NLIKE literal",
|
|
/* 23 */
|
|
"expr ::= and",
|
|
/* 24 */
|
|
"expr ::= or",
|
|
/* 25 */
|
|
"expr ::= not",
|
|
/* 26 */
|
|
"expr ::= eq",
|
|
/* 27 */
|
|
"expr ::= neq",
|
|
/* 28 */
|
|
"expr ::= gt",
|
|
/* 29 */
|
|
"expr ::= gte",
|
|
/* 30 */
|
|
"expr ::= lt",
|
|
/* 31 */
|
|
"expr ::= lte",
|
|
/* 32 */
|
|
"expr ::= like",
|
|
/* 33 */
|
|
"expr ::= nlike",
|
|
/* 34 */
|
|
"expr ::= LCB expr RCB",
|
|
/* 35 */
|
|
"address_literal_content ::= string_literal",
|
|
/* 36 */
|
|
"address_literal_content ::= address_literal_content COMMA string_literal",
|
|
/* 37 */
|
|
"address_literal_content_or_empty ::= address_literal_content",
|
|
/* 38 */
|
|
"address_literal_content_or_empty ::=",
|
|
/* 39 */
|
|
"address_literal ::= ADDRESS LSB address_literal_content_or_empty RSB",
|
|
/* 40 */
|
|
"literal ::= address_literal",
|
|
/* 41 */
|
|
"oid_literal_content ::= id",
|
|
/* 42 */
|
|
"oid_literal_content ::= oid_literal_content DOT id",
|
|
/* 43 */
|
|
"oid_literal_content_or_empty ::= oid_literal_content",
|
|
/* 44 */
|
|
"oid_literal_content_or_empty ::=",
|
|
/* 45 */
|
|
"oid_literal ::= OID LSB oid_literal_content_or_empty RSB",
|
|
/* 46 */
|
|
"literal ::= oid_literal",
|
|
/* 47 */
|
|
"time_diff_literal ::= TIMEDIFF LSB integer_literal integer_literal COLON integer_literal COLON integer_literal integer_literal RSB",
|
|
/* 48 */
|
|
"literal ::= time_diff_literal",
|
|
];
|
|
/*
|
|
** Try to increase the size of the parser stack. Return the number
|
|
** of errors. Return 0 on success.
|
|
*/
|
|
this.yyGrowStack = function() {
|
|
// fix me: yystksz*2 + 100
|
|
this.yystack.push({
|
|
stateno: undefined,
|
|
major: undefined,
|
|
minor: undefined
|
|
});
|
|
}
|
|
|
|
/* Initialize a new parser that has already been allocated.
|
|
*/
|
|
this.init = function() {
|
|
this.yyhwm = 0;
|
|
this.yyerrcnt = -1;
|
|
this.yyidx = 0;
|
|
if (this.YYSTACKDEPTH <= 0) {
|
|
this.yystack = [];
|
|
this.yyGrowStack();
|
|
} else {
|
|
this.yystack = new Array(this.YYSTACKDEPTH);
|
|
for (var i = 0; i < this.YYSTACKDEPTH; i++) {
|
|
this.yystack[i] = {
|
|
stateno: undefined,
|
|
major: undefined,
|
|
minor: undefined
|
|
};
|
|
}
|
|
}
|
|
var yytos = this.yystack[0];
|
|
yytos.stateno = 0;
|
|
yytos.major = 0;
|
|
}
|
|
|
|
/* The following function deletes the "minor type" or semantic value
|
|
** associated with a symbol. The symbol can be either a terminal
|
|
** or nonterminal. "yymajor" is the symbol code, and "yypminor" is
|
|
** a pointer to the value to be deleted. The code used to do the
|
|
** deletions is derived from the %destructor and/or %token_destructor
|
|
** directives of the input grammar.
|
|
*/
|
|
this.yy_destructor = function(
|
|
yymajor, /* Type code for object to destroy */
|
|
yyminor /* The object to be destroyed */
|
|
) {
|
|
switch (yymajor) {
|
|
/* Here is inserted the actions which take place when a
|
|
** terminal or non-terminal is destroyed. This can happen
|
|
** when the symbol is popped from the stack during a
|
|
** reduce or during error processing or when a parser is
|
|
** being destroyed before it is finished parsing.
|
|
**
|
|
** Note: during a reduce, the only symbols destroyed are those
|
|
** which appear on the RHS of the rule, but which are *not* used
|
|
** inside the C code.
|
|
*/
|
|
/********* Begin destructor definitions ***************************************/
|
|
/********* End destructor definitions *****************************************/
|
|
default:
|
|
break; /* If no destructor action specified: do nothing */
|
|
}
|
|
}
|
|
|
|
/*
|
|
** Pop the parser's stack once.
|
|
**
|
|
** If there is a destructor routine associated with the token which
|
|
** is popped from the stack, then call it.
|
|
*/
|
|
this.yy_pop_parser_stack = function() {
|
|
// assert( pParser->yytos!=0 );
|
|
// assert( pParser->yytos > pParser->yystack );
|
|
var yytos = this.yystack[this.yyidx];
|
|
|
|
if (this.yyTraceCallback) {
|
|
this.trace("Popping " + this.yyTokenName[yytos.major]);
|
|
}
|
|
this.yy_destructor(yytos.major, yytos.minor);
|
|
|
|
this.yyidx--;
|
|
}
|
|
|
|
/*
|
|
** Clear all secondary memory allocations from the parser
|
|
*/
|
|
this.finalize = function() {
|
|
while (this.yyidx > 0) {
|
|
this.yy_pop_parser_stack();
|
|
}
|
|
this.yystack = null;
|
|
}
|
|
|
|
/*
|
|
** Return the peak depth of the stack for a parser.
|
|
*/
|
|
this.getStackPeak = function() {
|
|
return this.yyhwm;
|
|
}
|
|
|
|
/*
|
|
** Find the appropriate action for a parser given the terminal
|
|
** look-ahead token iLookAhead.
|
|
*/
|
|
this.yy_find_shift_action = function(
|
|
iLookAhead /* The look-ahead token */
|
|
) {
|
|
var yytos = this.yystack[this.yyidx];
|
|
var stateno = yytos.stateno;
|
|
|
|
if (stateno >= this.YY_MIN_REDUCE) {
|
|
return stateno;
|
|
}
|
|
|
|
// assert( stateno <= YY_SHIFT_COUNT );
|
|
|
|
do {
|
|
var i = this.yy_shift_ofst[stateno];
|
|
// assert( iLookAhead!=YYNOCODE );
|
|
i += iLookAhead;
|
|
if (i < 0 || i >= this.yy_action.length || this.yy_lookahead[i] != iLookAhead) {
|
|
if (this.YYFALLBACK) {
|
|
var iFallback; /* Fallback token */
|
|
if ((iLookAhead < this.yyFallback.length) &&
|
|
(iFallback = this.yyFallback[iLookAhead]) != 0
|
|
) {
|
|
if (this.yyTraceCallback) {
|
|
this.trace("FALLBACK " + this.yyTokenName[iLookAhead] + " => " + this.yyTokenName[iFallback]);
|
|
}
|
|
}
|
|
// assert( yyFallback[iFallback]==0 ); /* Fallback loop must terminate */
|
|
iLookAhead = iFallback;
|
|
continue;
|
|
}
|
|
|
|
if (this.YYWILDCARD) {
|
|
var j = i - iLookAhead + this.YYWILDCARD;
|
|
var cond1 = (this.YY_SHIFT_MIN + this.YYWILDCARD) < 0 ? j >= 0 : true;
|
|
var cond2 = (this.YY_SHIFT_MAX + this.YYWILDCARD) >= this.yy_action.length ? j < this.yy_action.length : true;
|
|
if (cond1 && cond2 && this.yy_lookahead[j] == this.YYWILDCARD && iLookAhead > 0) {
|
|
if (this.yyTraceCallback) {
|
|
this.trace("WILDCARD " + this.yyTokenName[iLookAhead] + " => " + this.yyTokenName[this.YYWILDCARD]);
|
|
}
|
|
return this.yy_action[j];
|
|
}
|
|
}
|
|
|
|
return this.yy_default[stateno];
|
|
} else {
|
|
return this.yy_action[i];
|
|
}
|
|
} while (true);
|
|
}
|
|
|
|
/*
|
|
** Find the appropriate action for a parser given the non-terminal
|
|
** look-ahead token iLookAhead.
|
|
*/
|
|
this.yy_find_reduce_action = function(
|
|
stateno, /* Current state number */
|
|
iLookAhead /* The look-ahead token */
|
|
) {
|
|
if (this.YYERRORSYMBOL) {
|
|
if (stateno > this.YY_REDUCE_COUNT) {
|
|
return this.yy_default[stateno];
|
|
}
|
|
} else {
|
|
// assert( stateno<=YY_REDUCE_COUNT );
|
|
}
|
|
|
|
var i = this.yy_reduce_ofst[stateno];
|
|
// assert( i!=YY_REDUCE_USE_DFLT );
|
|
// assert( iLookAhead!=YYNOCODE );
|
|
i += iLookAhead;
|
|
|
|
if (this.YYERRORSYMBOL) {
|
|
if (i < 0 || i >= this.yy_action.length || this.yy_lookahead[i] != iLookAhead) {
|
|
return this.yy_default[stateno];
|
|
}
|
|
} else {
|
|
// assert( i>=0 && i<YY_ACTTAB_COUNT );
|
|
// assert( yy_lookahead[i]==iLookAhead );
|
|
}
|
|
|
|
return this.yy_action[i];
|
|
}
|
|
|
|
/*
|
|
** The following routine is called if the stack overflows.
|
|
*/
|
|
this.yyStackOverflow = function() {
|
|
if (this.yyTraceCallback) {
|
|
this.trace("Stack Overflow!");
|
|
}
|
|
|
|
while (this.yyidx > 0) {
|
|
this.yy_pop_parser_stack();
|
|
}
|
|
/* Here code is inserted which will execute if the parser
|
|
** stack every overflows */
|
|
/******** Begin %stack_overflow code ******************************************/
|
|
/******** End %stack_overflow code ********************************************/
|
|
}
|
|
|
|
/*
|
|
** Print tracing information for a SHIFT action
|
|
*/
|
|
this.yyTraceShift = function(yyNewState) {
|
|
if (this.yyTraceCallback) {
|
|
var yytos = this.yystack[this.yyidx];
|
|
if (yyNewState < this.YYNSTATE) {
|
|
this.trace("Shift '" + this.yyTokenName[yytos.major] + "', go to state " + yyNewState);
|
|
} else {
|
|
this.trace("Shift '" + this.yyTokenName[yytos.major] + "'");
|
|
}
|
|
}
|
|
}
|
|
|
|
/*
|
|
** Perform a shift action.
|
|
*/
|
|
this.yy_shift = function(
|
|
yyNewState, /* The new state to shift in */
|
|
yyMajor, /* The major token to shift in */
|
|
yyMinor /* The minor token to shift in */
|
|
) {
|
|
this.yyidx++;
|
|
|
|
if (this.yyidx > this.yyhwm) {
|
|
this.yyhwm++;
|
|
// assert( yypParser->yyhwm == (int)(yypParser->yytos - yypParser->yystack) );
|
|
}
|
|
|
|
if (this.YYSTACKDEPTH > 0) {
|
|
if (this.yyidx >= this.YYSTACKDEPTH) {
|
|
this.yyidx--;
|
|
this.yyStackOverflow();
|
|
return;
|
|
}
|
|
} else {
|
|
if (this.yyidx >= this.yystack.length) {
|
|
this.yyGrowStack();
|
|
}
|
|
}
|
|
|
|
if (yyNewState > this.YY_MAX_SHIFT) {
|
|
yyNewState += this.YY_MIN_REDUCE - this.YY_MIN_SHIFTREDUCE;
|
|
}
|
|
|
|
var yytos = this.yystack[this.yyidx];
|
|
yytos.stateno = yyNewState;
|
|
yytos.major = yyMajor;
|
|
yytos.minor = yyMinor;
|
|
|
|
this.yyTraceShift(yyNewState);
|
|
}
|
|
|
|
/* The following table contains information about every rule that
|
|
** is used during the reduce.
|
|
*/
|
|
//{
|
|
// lhs, /* Symbol on the left-hand side of the rule */
|
|
// nrhs, /* Number of right-hand side symbols in the rule */
|
|
//}
|
|
this.yyRuleInfo = [{
|
|
lhs: 28,
|
|
nrhs: 1
|
|
},
|
|
{
|
|
lhs: 28,
|
|
nrhs: 1
|
|
},
|
|
{
|
|
lhs: 31,
|
|
nrhs: 1
|
|
},
|
|
{
|
|
lhs: 30,
|
|
nrhs: 1
|
|
},
|
|
{
|
|
lhs: 32,
|
|
nrhs: 1
|
|
},
|
|
{
|
|
lhs: 30,
|
|
nrhs: 1
|
|
},
|
|
{
|
|
lhs: 33,
|
|
nrhs: 1
|
|
},
|
|
{
|
|
lhs: 30,
|
|
nrhs: 1
|
|
},
|
|
{
|
|
lhs: 34,
|
|
nrhs: 1
|
|
},
|
|
{
|
|
lhs: 30,
|
|
nrhs: 1
|
|
},
|
|
{
|
|
lhs: 35,
|
|
nrhs: 1
|
|
},
|
|
{
|
|
lhs: 35,
|
|
nrhs: 1
|
|
},
|
|
{
|
|
lhs: 36,
|
|
nrhs: 3
|
|
},
|
|
{
|
|
lhs: 37,
|
|
nrhs: 3
|
|
},
|
|
{
|
|
lhs: 38,
|
|
nrhs: 2
|
|
},
|
|
{
|
|
lhs: 39,
|
|
nrhs: 3
|
|
},
|
|
{
|
|
lhs: 40,
|
|
nrhs: 3
|
|
},
|
|
{
|
|
lhs: 41,
|
|
nrhs: 3
|
|
},
|
|
{
|
|
lhs: 42,
|
|
nrhs: 3
|
|
},
|
|
{
|
|
lhs: 43,
|
|
nrhs: 3
|
|
},
|
|
{
|
|
lhs: 44,
|
|
nrhs: 3
|
|
},
|
|
{
|
|
lhs: 45,
|
|
nrhs: 3
|
|
},
|
|
{
|
|
lhs: 46,
|
|
nrhs: 3
|
|
},
|
|
{
|
|
lhs: 29,
|
|
nrhs: 1
|
|
},
|
|
{
|
|
lhs: 29,
|
|
nrhs: 1
|
|
},
|
|
{
|
|
lhs: 29,
|
|
nrhs: 1
|
|
},
|
|
{
|
|
lhs: 29,
|
|
nrhs: 1
|
|
},
|
|
{
|
|
lhs: 29,
|
|
nrhs: 1
|
|
},
|
|
{
|
|
lhs: 29,
|
|
nrhs: 1
|
|
},
|
|
{
|
|
lhs: 29,
|
|
nrhs: 1
|
|
},
|
|
{
|
|
lhs: 29,
|
|
nrhs: 1
|
|
},
|
|
{
|
|
lhs: 29,
|
|
nrhs: 1
|
|
},
|
|
{
|
|
lhs: 29,
|
|
nrhs: 1
|
|
},
|
|
{
|
|
lhs: 29,
|
|
nrhs: 1
|
|
},
|
|
{
|
|
lhs: 29,
|
|
nrhs: 3
|
|
},
|
|
{
|
|
lhs: 47,
|
|
nrhs: 1
|
|
},
|
|
{
|
|
lhs: 47,
|
|
nrhs: 3
|
|
},
|
|
{
|
|
lhs: 48,
|
|
nrhs: 1
|
|
},
|
|
{
|
|
lhs: 48,
|
|
nrhs: 0
|
|
},
|
|
{
|
|
lhs: 49,
|
|
nrhs: 4
|
|
},
|
|
{
|
|
lhs: 30,
|
|
nrhs: 1
|
|
},
|
|
{
|
|
lhs: 50,
|
|
nrhs: 1
|
|
},
|
|
{
|
|
lhs: 50,
|
|
nrhs: 3
|
|
},
|
|
{
|
|
lhs: 51,
|
|
nrhs: 1
|
|
},
|
|
{
|
|
lhs: 51,
|
|
nrhs: 0
|
|
},
|
|
{
|
|
lhs: 52,
|
|
nrhs: 4
|
|
},
|
|
{
|
|
lhs: 30,
|
|
nrhs: 1
|
|
},
|
|
{
|
|
lhs: 53,
|
|
nrhs: 10
|
|
},
|
|
{
|
|
lhs: 30,
|
|
nrhs: 1
|
|
},
|
|
];
|
|
|
|
/*
|
|
** Perform a reduce action and the shift that must immediately
|
|
** follow the reduce.
|
|
*/
|
|
this.yy_reduce = function(
|
|
yyruleno /* Number of the rule by which to reduce */
|
|
) {
|
|
var yymsp = this.yystack[this.yyidx]; /* The top of the parser's stack */
|
|
|
|
if (yyruleno < this.yyRuleName.length) {
|
|
var yysize = this.yyRuleInfo[yyruleno].nrhs;
|
|
var ruleName = this.yyRuleName[yyruleno];
|
|
var newStateNo = this.yystack[this.yyidx - yysize].stateno;
|
|
if (this.yyTraceCallback) {
|
|
this.trace("Reduce [" + ruleName + "], go to state " + newStateNo + ".");
|
|
}
|
|
}
|
|
|
|
/* Check that the stack is large enough to grow by a single entry
|
|
** if the RHS of the rule is empty. This ensures that there is room
|
|
** enough on the stack to push the LHS value */
|
|
if (this.yyRuleInfo[yyruleno].nrhs == 0) {
|
|
if (this.yyidx > this.yyhwm) {
|
|
this.yyhwm++;
|
|
// assert( yypParser->yyhwm == (int)(yypParser->yytos - yypParser->yystack));
|
|
}
|
|
if (this.YYSTACKDEPTH > 0) {
|
|
if (this.yyidx >= this.YYSTACKDEPTH - 1) {
|
|
this.yyStackOverflow();
|
|
return;
|
|
}
|
|
} else {
|
|
if (this.yyidx >= this.yystack.length - 1) {
|
|
this.yyGrowStack();
|
|
yymsp = this.yystack[this.yyidx];
|
|
}
|
|
}
|
|
}
|
|
|
|
var yylhsminor;
|
|
switch (yyruleno) {
|
|
/* Beginning here are the reduction cases. A typical example
|
|
** follows:
|
|
** case 0:
|
|
** #line <lineno> <grammarfile>
|
|
** { ... } // User supplied code
|
|
** #line <lineno> <thisfile>
|
|
** break;
|
|
*/
|
|
/********** Begin reduce actions **********************************************/
|
|
case 0:
|
|
/* main ::= expr */
|
|
case 1:
|
|
/* main ::= literal */ this.yytestcase(yyruleno == 1); {
|
|
_result.root_node = this.yystack[this.yyidx + 0].minor
|
|
}
|
|
break;
|
|
case 2:
|
|
/* integer_literal ::= INTEGER_LITERAL */ {
|
|
yylhsminor = new tokens.integer_literal({
|
|
children: [
|
|
new tokens.LEXEME({
|
|
type: this.yystack[this.yyidx + 0].minor.lexeme,
|
|
value: this.yystack[this.yyidx + 0].minor.value,
|
|
start: this.yystack[this.yyidx + 0].minor.start,
|
|
end: this.yystack[this.yyidx + 0].minor.end
|
|
})
|
|
]
|
|
});
|
|
}
|
|
this.yystack[this.yyidx + 0].minor = yylhsminor;
|
|
break;
|
|
case 3:
|
|
/* literal ::= integer_literal */
|
|
case 5:
|
|
/* literal ::= float_literal */ this.yytestcase(yyruleno == 5);
|
|
case 7:
|
|
/* literal ::= bool_literal */ this.yytestcase(yyruleno == 7);
|
|
case 9:
|
|
/* literal ::= string_literal */ this.yytestcase(yyruleno == 9);
|
|
case 23:
|
|
/* expr ::= and */ this.yytestcase(yyruleno == 23);
|
|
case 24:
|
|
/* expr ::= or */ this.yytestcase(yyruleno == 24);
|
|
case 25:
|
|
/* expr ::= not */ this.yytestcase(yyruleno == 25);
|
|
case 26:
|
|
/* expr ::= eq */ this.yytestcase(yyruleno == 26);
|
|
case 27:
|
|
/* expr ::= neq */ this.yytestcase(yyruleno == 27);
|
|
case 28:
|
|
/* expr ::= gt */ this.yytestcase(yyruleno == 28);
|
|
case 29:
|
|
/* expr ::= gte */ this.yytestcase(yyruleno == 29);
|
|
case 30:
|
|
/* expr ::= lt */ this.yytestcase(yyruleno == 30);
|
|
case 31:
|
|
/* expr ::= lte */ this.yytestcase(yyruleno == 31);
|
|
case 32:
|
|
/* expr ::= like */ this.yytestcase(yyruleno == 32);
|
|
case 33:
|
|
/* expr ::= nlike */ this.yytestcase(yyruleno == 33);
|
|
case 37:
|
|
/* address_literal_content_or_empty ::= address_literal_content */ this.yytestcase(yyruleno == 37);
|
|
case 40:
|
|
/* literal ::= address_literal */ this.yytestcase(yyruleno == 40);
|
|
case 43:
|
|
/* oid_literal_content_or_empty ::= oid_literal_content */ this.yytestcase(yyruleno == 43);
|
|
case 46:
|
|
/* literal ::= oid_literal */ this.yytestcase(yyruleno == 46);
|
|
case 48:
|
|
/* literal ::= time_diff_literal */ this.yytestcase(yyruleno == 48); {
|
|
yylhsminor = this.yystack[this.yyidx + 0].minor;
|
|
}
|
|
this.yystack[this.yyidx + 0].minor = yylhsminor;
|
|
break;
|
|
case 4:
|
|
/* float_literal ::= FLOAT_LITERAL */ {
|
|
yylhsminor = new tokens.float_literal({
|
|
children: [
|
|
new tokens.LEXEME({
|
|
type: this.yystack[this.yyidx + 0].minor.lexeme,
|
|
value: this.yystack[this.yyidx + 0].minor.value,
|
|
start: this.yystack[this.yyidx + 0].minor.start,
|
|
end: this.yystack[this.yyidx + 0].minor.end
|
|
})
|
|
]
|
|
})
|
|
}
|
|
this.yystack[this.yyidx + 0].minor = yylhsminor;
|
|
break;
|
|
case 6:
|
|
/* bool_literal ::= BOOL_LITERAL */ {
|
|
yylhsminor = new tokens.bool_literal({
|
|
children: [
|
|
new tokens.LEXEME({
|
|
type: this.yystack[this.yyidx + 0].minor.lexeme,
|
|
value: this.yystack[this.yyidx + 0].minor.value,
|
|
start: this.yystack[this.yyidx + 0].minor.start,
|
|
end: this.yystack[this.yyidx + 0].minor.end
|
|
})
|
|
]
|
|
})
|
|
}
|
|
this.yystack[this.yyidx + 0].minor = yylhsminor;
|
|
break;
|
|
case 8:
|
|
/* string_literal ::= STRING_LITERAL */ {
|
|
yylhsminor = new tokens.string_literal({
|
|
children: [
|
|
new tokens.LEXEME({
|
|
type: this.yystack[this.yyidx + 0].minor.lexeme,
|
|
value: this.yystack[this.yyidx + 0].minor.value,
|
|
start: this.yystack[this.yyidx + 0].minor.start,
|
|
end: this.yystack[this.yyidx + 0].minor.end
|
|
})
|
|
]
|
|
});
|
|
}
|
|
this.yystack[this.yyidx + 0].minor = yylhsminor;
|
|
break;
|
|
case 10:
|
|
/* id ::= string_literal */ {
|
|
yylhsminor = new tokens.id({
|
|
children: [this.yystack[this.yyidx + 0].minor]
|
|
});
|
|
}
|
|
this.yystack[this.yyidx + 0].minor = yylhsminor;
|
|
break;
|
|
case 11:
|
|
/* id ::= ID */ {
|
|
yylhsminor = new tokens.id({
|
|
children: [
|
|
new tokens.LEXEME({
|
|
type: this.yystack[this.yyidx + 0].minor.lexeme,
|
|
value: this.yystack[this.yyidx + 0].minor.value,
|
|
start: this.yystack[this.yyidx + 0].minor.start,
|
|
end: this.yystack[this.yyidx + 0].minor.end
|
|
})
|
|
]
|
|
});
|
|
}
|
|
this.yystack[this.yyidx + 0].minor = yylhsminor;
|
|
break;
|
|
case 12:
|
|
/* and ::= expr AND expr */ {
|
|
yylhsminor = new tokens.and({
|
|
lexpr: this.yystack[this.yyidx + -2].minor,
|
|
op: new tokens.LEXEME({
|
|
type: this.yystack[this.yyidx + -1].minor.lexeme,
|
|
value: this.yystack[this.yyidx + -1].minor.value,
|
|
start: this.yystack[this.yyidx + -1].minor.start,
|
|
end: this.yystack[this.yyidx + -1].minor.end
|
|
}),
|
|
rexpr: this.yystack[this.yyidx + 0].minor
|
|
})
|
|
}
|
|
this.yystack[this.yyidx + -2].minor = yylhsminor;
|
|
break;
|
|
case 13:
|
|
/* or ::= expr OR expr */ {
|
|
yylhsminor = new tokens.or({
|
|
lexpr: this.yystack[this.yyidx + -2].minor,
|
|
op: new tokens.LEXEME({
|
|
type: this.yystack[this.yyidx + -1].minor.lexeme,
|
|
value: this.yystack[this.yyidx + -1].minor.value,
|
|
start: this.yystack[this.yyidx + -1].minor.start,
|
|
end: this.yystack[this.yyidx + -1].minor.end
|
|
}),
|
|
rexpr: this.yystack[this.yyidx + 0].minor
|
|
})
|
|
}
|
|
this.yystack[this.yyidx + -2].minor = yylhsminor;
|
|
break;
|
|
case 14:
|
|
/* not ::= NOT expr */ {
|
|
yylhsminor = new tokens.not({
|
|
op: new tokens.LEXEME({
|
|
type: this.yystack[this.yyidx + -1].minor.lexeme,
|
|
value: this.yystack[this.yyidx + -1].minor.value,
|
|
start: this.yystack[this.yyidx + -1].minor.start,
|
|
end: this.yystack[this.yyidx + -1].minor.end
|
|
}),
|
|
rexpr: this.yystack[this.yyidx + 0].minor
|
|
})
|
|
}
|
|
this.yystack[this.yyidx + -1].minor = yylhsminor;
|
|
break;
|
|
case 15:
|
|
/* eq ::= id EQ literal */ {
|
|
yylhsminor = new tokens.eq({
|
|
id: this.yystack[this.yyidx + -2].minor,
|
|
op: new tokens.LEXEME({
|
|
type: this.yystack[this.yyidx + -1].minor.lexeme,
|
|
value: this.yystack[this.yyidx + -1].minor.value,
|
|
start: this.yystack[this.yyidx + -1].minor.start,
|
|
end: this.yystack[this.yyidx + -1].minor.end
|
|
}),
|
|
literal: this.yystack[this.yyidx + 0].minor
|
|
});
|
|
}
|
|
this.yystack[this.yyidx + -2].minor = yylhsminor;
|
|
break;
|
|
case 16:
|
|
/* neq ::= id NEQ literal */ {
|
|
yylhsminor = new tokens.neq({
|
|
id: this.yystack[this.yyidx + -2].minor,
|
|
op: new tokens.LEXEME({
|
|
type: this.yystack[this.yyidx + -1].minor.lexeme,
|
|
value: this.yystack[this.yyidx + -1].minor.value,
|
|
start: this.yystack[this.yyidx + -1].minor.start,
|
|
end: this.yystack[this.yyidx + -1].minor.end
|
|
}),
|
|
literal: this.yystack[this.yyidx + 0].minor
|
|
});
|
|
}
|
|
this.yystack[this.yyidx + -2].minor = yylhsminor;
|
|
break;
|
|
case 17:
|
|
/* gt ::= id GT literal */ {
|
|
yylhsminor = new tokens.gt({
|
|
id: this.yystack[this.yyidx + -2].minor,
|
|
op: new tokens.LEXEME({
|
|
type: this.yystack[this.yyidx + -1].minor.lexeme,
|
|
value: this.yystack[this.yyidx + -1].minor.value,
|
|
start: this.yystack[this.yyidx + -1].minor.start,
|
|
end: this.yystack[this.yyidx + -1].minor.end
|
|
}),
|
|
literal: this.yystack[this.yyidx + 0].minor
|
|
});
|
|
}
|
|
this.yystack[this.yyidx + -2].minor = yylhsminor;
|
|
break;
|
|
case 18:
|
|
/* gte ::= id GTE literal */ {
|
|
yylhsminor = new tokens.gte({
|
|
id: this.yystack[this.yyidx + -2].minor,
|
|
op: new tokens.LEXEME({
|
|
type: this.yystack[this.yyidx + -1].minor.lexeme,
|
|
value: this.yystack[this.yyidx + -1].minor.value,
|
|
start: this.yystack[this.yyidx + -1].minor.start,
|
|
end: this.yystack[this.yyidx + -1].minor.end
|
|
}),
|
|
literal: this.yystack[this.yyidx + 0].minor
|
|
});
|
|
}
|
|
this.yystack[this.yyidx + -2].minor = yylhsminor;
|
|
break;
|
|
case 19:
|
|
/* lt ::= id LT literal */ {
|
|
yylhsminor = new tokens.lt({
|
|
id: this.yystack[this.yyidx + -2].minor,
|
|
op: new tokens.LEXEME({
|
|
type: this.yystack[this.yyidx + -1].minor.lexeme,
|
|
value: this.yystack[this.yyidx + -1].minor.value,
|
|
start: this.yystack[this.yyidx + -1].minor.start,
|
|
end: this.yystack[this.yyidx + -1].minor.end
|
|
}),
|
|
literal: this.yystack[this.yyidx + 0].minor
|
|
});
|
|
}
|
|
this.yystack[this.yyidx + -2].minor = yylhsminor;
|
|
break;
|
|
case 20:
|
|
/* lte ::= id LTE literal */ {
|
|
yylhsminor = new tokens.lte({
|
|
id: this.yystack[this.yyidx + -2].minor,
|
|
op: new tokens.LEXEME({
|
|
type: this.yystack[this.yyidx + -1].minor.lexeme,
|
|
value: this.yystack[this.yyidx + -1].minor.value,
|
|
start: this.yystack[this.yyidx + -1].minor.start,
|
|
end: this.yystack[this.yyidx + -1].minor.end
|
|
}),
|
|
literal: this.yystack[this.yyidx + 0].minor
|
|
});
|
|
}
|
|
this.yystack[this.yyidx + -2].minor = yylhsminor;
|
|
break;
|
|
case 21:
|
|
/* like ::= id LIKE literal */ {
|
|
yylhsminor = new tokens.like({
|
|
id: this.yystack[this.yyidx + -2].minor,
|
|
op: new tokens.LEXEME({
|
|
type: this.yystack[this.yyidx + -1].minor.lexeme,
|
|
value: this.yystack[this.yyidx + -1].minor.value,
|
|
start: this.yystack[this.yyidx + -1].minor.start,
|
|
end: this.yystack[this.yyidx + -1].minor.end
|
|
}),
|
|
literal: this.yystack[this.yyidx + 0].minor
|
|
});
|
|
}
|
|
this.yystack[this.yyidx + -2].minor = yylhsminor;
|
|
break;
|
|
case 22:
|
|
/* nlike ::= id NLIKE literal */ {
|
|
yylhsminor = new tokens.nlike({
|
|
id: this.yystack[this.yyidx + -2].minor,
|
|
op: new tokens.LEXEME({
|
|
type: this.yystack[this.yyidx + -1].minor.lexeme,
|
|
value: this.yystack[this.yyidx + -1].minor.value,
|
|
start: this.yystack[this.yyidx + -1].minor.start,
|
|
end: this.yystack[this.yyidx + -1].minor.end
|
|
}),
|
|
literal: this.yystack[this.yyidx + 0].minor
|
|
});
|
|
}
|
|
this.yystack[this.yyidx + -2].minor = yylhsminor;
|
|
break;
|
|
case 34:
|
|
/* expr ::= LCB expr RCB */ {
|
|
yylhsminor = new tokens.sub_expr({
|
|
LCB: new tokens.LEXEME({
|
|
type: this.yystack[this.yyidx + -2].minor.lexeme,
|
|
value: this.yystack[this.yyidx + -2].minor.value,
|
|
start: this.yystack[this.yyidx + -2].minor.start,
|
|
end: this.yystack[this.yyidx + -2].minor.end
|
|
}),
|
|
expr: this.yystack[this.yyidx + -1].minor,
|
|
RCB: new tokens.LEXEME({
|
|
type: this.yystack[this.yyidx + 0].minor.lexeme,
|
|
value: this.yystack[this.yyidx + 0].minor.value,
|
|
start: this.yystack[this.yyidx + 0].minor.start,
|
|
end: this.yystack[this.yyidx + 0].minor.end
|
|
})
|
|
});
|
|
}
|
|
this.yystack[this.yyidx + -2].minor = yylhsminor;
|
|
break;
|
|
case 35:
|
|
/* address_literal_content ::= string_literal */ {
|
|
yylhsminor = new tokens.address_literal_content({
|
|
children: [this.yystack[this.yyidx + 0].minor]
|
|
});
|
|
}
|
|
this.yystack[this.yyidx + 0].minor = yylhsminor;
|
|
break;
|
|
case 36:
|
|
/* address_literal_content ::= address_literal_content COMMA string_literal */
|
|
case 42:
|
|
/* oid_literal_content ::= oid_literal_content DOT id */ this.yytestcase(yyruleno == 42); {
|
|
this.yystack[this.yyidx + -2].minor.add(this.yystack[this.yyidx + 0].minor);
|
|
yylhsminor = this.yystack[this.yyidx + -2].minor;
|
|
}
|
|
this.yystack[this.yyidx + -2].minor = yylhsminor;
|
|
break;
|
|
case 38:
|
|
/* address_literal_content_or_empty ::= */ {
|
|
this.yystack[this.yyidx + 1].minor = new tokens.address_literal_content({
|
|
children: []
|
|
});
|
|
}
|
|
break;
|
|
case 39:
|
|
/* address_literal ::= ADDRESS LSB address_literal_content_or_empty RSB */ {
|
|
yylhsminor = new tokens.address_literal({
|
|
children: this.yystack[this.yyidx + -1].minor.children,
|
|
keyword: new tokens.LEXEME({
|
|
type: this.yystack[this.yyidx + -3].minor.lexeme,
|
|
value: this.yystack[this.yyidx + -3].minor.value,
|
|
start: this.yystack[this.yyidx + -3].minor.start,
|
|
end: this.yystack[this.yyidx + -3].minor.end
|
|
}),
|
|
LSB: new tokens.LEXEME({
|
|
type: this.yystack[this.yyidx + -2].minor.lexeme,
|
|
value: this.yystack[this.yyidx + -2].minor.value,
|
|
start: this.yystack[this.yyidx + -2].minor.start,
|
|
end: this.yystack[this.yyidx + -2].minor.end
|
|
}),
|
|
RSB: new tokens.LEXEME({
|
|
type: this.yystack[this.yyidx + 0].minor.lexeme,
|
|
value: this.yystack[this.yyidx + 0].minor.value,
|
|
start: this.yystack[this.yyidx + 0].minor.start,
|
|
end: this.yystack[this.yyidx + 0].minor.end
|
|
})
|
|
});
|
|
}
|
|
this.yystack[this.yyidx + -3].minor = yylhsminor;
|
|
break;
|
|
case 41:
|
|
/* oid_literal_content ::= id */ {
|
|
yylhsminor = new tokens.oid_literal_content({
|
|
children: [this.yystack[this.yyidx + 0].minor]
|
|
});
|
|
}
|
|
this.yystack[this.yyidx + 0].minor = yylhsminor;
|
|
break;
|
|
case 44:
|
|
/* oid_literal_content_or_empty ::= */ {
|
|
this.yystack[this.yyidx + 1].minor = new tokens.oid_literal_content({
|
|
children: []
|
|
});
|
|
}
|
|
break;
|
|
case 45:
|
|
/* oid_literal ::= OID LSB oid_literal_content_or_empty RSB */ {
|
|
yylhsminor = new tokens.oid_literal({
|
|
children: this.yystack[this.yyidx + -1].minor.children,
|
|
keyword: new tokens.LEXEME({
|
|
type: this.yystack[this.yyidx + -3].minor.lexeme,
|
|
value: this.yystack[this.yyidx + -3].minor.value,
|
|
start: this.yystack[this.yyidx + -3].minor.start,
|
|
end: this.yystack[this.yyidx + -3].minor.end
|
|
}),
|
|
LSB: new tokens.LEXEME({
|
|
type: this.yystack[this.yyidx + -2].minor.lexeme,
|
|
value: this.yystack[this.yyidx + -2].minor.value,
|
|
start: this.yystack[this.yyidx + -2].minor.start,
|
|
end: this.yystack[this.yyidx + -2].minor.end
|
|
}),
|
|
RSB: new tokens.LEXEME({
|
|
type: this.yystack[this.yyidx + 0].minor.lexeme,
|
|
value: this.yystack[this.yyidx + 0].minor.value,
|
|
start: this.yystack[this.yyidx + 0].minor.start,
|
|
end: this.yystack[this.yyidx + 0].minor.end
|
|
})
|
|
});
|
|
}
|
|
this.yystack[this.yyidx + -3].minor = yylhsminor;
|
|
break;
|
|
case 47:
|
|
/* time_diff_literal ::= TIMEDIFF LSB integer_literal integer_literal COLON integer_literal COLON integer_literal integer_literal RSB */ {
|
|
yylhsminor = new tokens.time_diff_literal({
|
|
keyword: new tokens.LEXEME({
|
|
type: this.yystack[this.yyidx + -9].minor.lexeme,
|
|
value: this.yystack[this.yyidx + -9].minor.value,
|
|
start: this.yystack[this.yyidx + -9].minor.start,
|
|
end: this.yystack[this.yyidx + -9].minor.end
|
|
}),
|
|
LSB: new tokens.LEXEME({
|
|
type: this.yystack[this.yyidx + -8].minor.lexeme,
|
|
value: this.yystack[this.yyidx + -8].minor.value,
|
|
start: this.yystack[this.yyidx + -8].minor.start,
|
|
end: this.yystack[this.yyidx + -8].minor.end
|
|
}),
|
|
RSB: new tokens.LEXEME({
|
|
type: this.yystack[this.yyidx + 0].minor.lexeme,
|
|
value: this.yystack[this.yyidx + 0].minor.value,
|
|
start: this.yystack[this.yyidx + 0].minor.start,
|
|
end: this.yystack[this.yyidx + 0].minor.end
|
|
}),
|
|
days: this.yystack[this.yyidx + -7].minor,
|
|
hours: this.yystack[this.yyidx + -6].minor,
|
|
minutes: this.yystack[this.yyidx + -4].minor,
|
|
seconds: this.yystack[this.yyidx + -2].minor,
|
|
microseconds: this.yystack[this.yyidx + -1].minor,
|
|
});
|
|
}
|
|
this.yystack[this.yyidx + -9].minor = yylhsminor;
|
|
break;
|
|
default:
|
|
break;
|
|
/********** End reduce actions ************************************************/
|
|
};
|
|
// assert( yyruleno<sizeof(yyRuleInfo)/sizeof(yyRuleInfo[0]) );
|
|
|
|
var yygoto = this.yyRuleInfo[yyruleno].lhs; /* The next state */
|
|
var yysize = this.yyRuleInfo[yyruleno].nrhs; /* Amount to pop the stack */
|
|
var yyact = this.yy_find_reduce_action( /* The next action */
|
|
this.yystack[this.yyidx - yysize].stateno,
|
|
yygoto
|
|
);
|
|
if (yyact <= this.YY_MAX_SHIFTREDUCE) {
|
|
if (yyact > this.YY_MAX_SHIFT) {
|
|
yyact += this.YY_MIN_REDUCE - this.YY_MIN_SHIFTREDUCE;
|
|
}
|
|
this.yyidx -= yysize - 1;
|
|
yymsp = this.yystack[this.yyidx];
|
|
yymsp.stateno = yyact;
|
|
yymsp.major = yygoto;
|
|
this.yyTraceShift(yyact);
|
|
} else {
|
|
// assert( yyact == YY_ACCEPT_ACTION );
|
|
this.yyidx -= yysize;
|
|
this.yy_accept();
|
|
}
|
|
}
|
|
|
|
/*
|
|
** The following code executes when the parse fails
|
|
*/
|
|
this.yy_parse_failed = function() {
|
|
if (this.yyTraceCallback) {
|
|
this.trace("Fail!");
|
|
}
|
|
while (this.yyidx > 0) {
|
|
this.yy_pop_parser_stack();
|
|
}
|
|
/* Here code is inserted which will be executed whenever the
|
|
** parser fails */
|
|
/************ Begin %parse_failure code ***************************************/
|
|
/************ End %parse_failure code *****************************************/
|
|
}
|
|
|
|
/*
|
|
** The following code executes when a syntax error first occurs.
|
|
*/
|
|
this.yy_syntax_error = function(
|
|
yymajor, /* The major type of the error token */
|
|
yyminor /* The minor type of the error token */
|
|
) {
|
|
var TOKEN = yyminor;
|
|
/************ Begin %syntax_error code ****************************************/
|
|
|
|
console.log("Syntax error");
|
|
/************ End %syntax_error code ******************************************/
|
|
}
|
|
|
|
/*
|
|
** The following is executed when the parser accepts
|
|
*/
|
|
this.yy_accept = function() {
|
|
if (this.yyTraceCallback) {
|
|
this.trace("Accept!");
|
|
}
|
|
if (!this.YYNOERRORRECOVERY) {
|
|
this.yyerrcnt = -1;
|
|
}
|
|
// assert( yypParser->yytos==yypParser->yystack );
|
|
/* Here code is inserted which will be executed whenever the
|
|
** parser accepts */
|
|
/*********** Begin %parse_accept code *****************************************/
|
|
/*********** End %parse_accept code *******************************************/
|
|
}
|
|
|
|
/* The main parser program.
|
|
** The first argument is a pointer to a structure obtained from
|
|
** "ParserAlloc" which describes the current state of the parser.
|
|
** The second argument is the major token number. The third is
|
|
** the minor token. The fourth optional argument is whatever the
|
|
** user wants (and specified in the grammar) and is available for
|
|
** use by the action routines.
|
|
**
|
|
** Inputs:
|
|
** <ul>
|
|
** <li> A pointer to the parser (an opaque structure.)
|
|
** <li> The major token number.
|
|
** <li> The minor token number.
|
|
** <li> An option argument of a grammar-specified type.
|
|
** </ul>
|
|
**
|
|
** Outputs:
|
|
** None.
|
|
*/
|
|
this.parse = function(
|
|
yymajor, /* The major token code number */
|
|
yyminor /* The value for the token */
|
|
) {
|
|
var yyact; /* The parser action. */
|
|
var yyendofinput; /* True if we are at the end of input */
|
|
var yyerrorhit = 0; /* True if yymajor has invoked an error */
|
|
|
|
//assert( yypParser->yytos!=0 );
|
|
|
|
if (yymajor === undefined || yymajor === null) {
|
|
yymajor = 0;
|
|
}
|
|
|
|
yyendofinput = yymajor == 0;
|
|
|
|
if (this.yyTraceCallback) {
|
|
this.trace("Input '" + this.yyTokenName[yymajor] + "'");
|
|
}
|
|
|
|
do {
|
|
yyact = this.yy_find_shift_action(yymajor);
|
|
if (yyact <= this.YY_MAX_SHIFTREDUCE) { // check me?
|
|
this.yy_shift(yyact, yymajor, yyminor);
|
|
if (!this.YYNOERRORRECOVERY) {
|
|
this.yyerrcnt--;
|
|
}
|
|
yymajor = this.YYNOCODE;
|
|
} else if (yyact <= this.YY_MAX_REDUCE) { // check me?
|
|
this.yy_reduce(yyact - this.YY_MIN_REDUCE); // check me?
|
|
} else {
|
|
// assert( yyact == YY_ERROR_ACTION );
|
|
if (this.yyTraceCallback) {
|
|
this.trace("Syntax Error!");
|
|
}
|
|
if (this.YYERRORSYMBOL) {
|
|
/* A syntax error has occurred.
|
|
** The response to an error depends upon whether or not the
|
|
** grammar defines an error token "ERROR".
|
|
**
|
|
** This is what we do if the grammar does define ERROR:
|
|
**
|
|
** * Call the %syntax_error function.
|
|
**
|
|
** * Begin popping the stack until we enter a state where
|
|
** it is legal to shift the error symbol, then shift
|
|
** the error symbol.
|
|
**
|
|
** * Set the error count to three.
|
|
**
|
|
** * Begin accepting and shifting new tokens. No new error
|
|
** processing will occur until three tokens have been
|
|
** shifted successfully.
|
|
**
|
|
*/
|
|
if (this.yyerrcnt < 0) {
|
|
this.yy_syntax_error(yymajor, yyminor);
|
|
}
|
|
var yymx = this.yystack[this.yyidx].major;
|
|
if (yymx == this.YYERRORSYMBOL || yyerrorhit) {
|
|
if (this.yyTraceCallback) {
|
|
this.trace("Discard input token " + this.yyTokenName[yymajor]);
|
|
}
|
|
this.yy_destructor(yymajor, yyminor);
|
|
yymajor = this.YYNOCODE;
|
|
} else {
|
|
while (this.yyidx >= 0 &&
|
|
yymx != this.YYERRORSYMBOL &&
|
|
(yyact = this.yy_find_reduce_action(
|
|
this.yystack[this.yyidx].stateno,
|
|
this.YYERRORSYMBOL)) >= this.YY_MIN_REDUCE // check me?
|
|
) {
|
|
this.yy_pop_parser_stack();
|
|
}
|
|
if (this.yyidx < 0 || yymajor == 0) {
|
|
this.yy_destructor(yymajor, yyminor);
|
|
this.yy_parse_failed();
|
|
if (!this.YYNOERRORRECOVERY) {
|
|
this.yyerrcnt = -1;
|
|
}
|
|
yymajor = this.YYNOCODE;
|
|
} else if (yymx != this.YYERRORSYMBOL) {
|
|
this.yy_shift(yyact, this.YYERRORSYMBOL, yyminor); // check me?
|
|
}
|
|
}
|
|
this.yyerrcnt = 3;
|
|
yyerrorhit = 1;
|
|
} else if (this.YYNOERRORRECOVERY) {
|
|
/* If the YYNOERRORRECOVERY macro is defined, then do not attempt to
|
|
** do any kind of error recovery. Instead, simply invoke the syntax
|
|
** error routine and continue going as if nothing had happened.
|
|
**
|
|
** Applications can set this macro (for example inside %include) if
|
|
** they intend to abandon the parse upon the first syntax error seen.
|
|
*/
|
|
this.yy_syntax_error(yymajor, yyminor);
|
|
this.yy_destructor(yymajor, yyminor);
|
|
yymajor = this.YYNOCODE;
|
|
} else {
|
|
/* YYERRORSYMBOL is not defined */
|
|
/* This is what we do if the grammar does not define ERROR:
|
|
**
|
|
** * Report an error message, and throw away the input token.
|
|
**
|
|
** * If the input token is $, then fail the parse.
|
|
**
|
|
** As before, subsequent error messages are suppressed until
|
|
** three input tokens have been successfully shifted.
|
|
*/
|
|
if (this.yyerrcnt <= 0) {
|
|
this.yy_syntax_error(yymajor, yyminor);
|
|
}
|
|
this.yyerrcnt = 3;
|
|
this.yy_destructor(yymajor, yyminor);
|
|
if (yyendofinput) {
|
|
this.yy_parse_failed();
|
|
if (!this.YYNOERRORRECOVERY) {
|
|
this.yyerrcnt = -1;
|
|
}
|
|
}
|
|
yymajor = this.YYNOCODE;
|
|
}
|
|
}
|
|
} while (yymajor != this.YYNOCODE && this.yyidx > 0);
|
|
|
|
if (this.yyTraceCallback) {
|
|
var remainingTokens = [];
|
|
for (var i = 1; i <= this.yyidx; i++) {
|
|
remainingTokens.push(this.yyTokenName[this.yystack[i].major]);
|
|
}
|
|
this.trace("Return. Stack=[" + remainingTokens.join(" ") + "]");
|
|
}
|
|
}
|
|
|
|
this.init();
|
|
|
|
} // function Parser()
|
|
|
|
/**
|
|
* Created by Aleksey Chichenkov <a.chichenkov@initi.ru> on 1/28/19.
|
|
*/
|
|
|
|
var fs = require("fs");
|
|
var Lexer = require('./lexer.js');
|
|
|
|
var tokens = (function() {
|
|
|
|
var std = (function() {
|
|
var protos = "__protos__";
|
|
var keys = "__keys__";
|
|
|
|
|
|
/**
|
|
* Return unique data
|
|
*
|
|
* @param {Object[]} _arr - prototypes of inheritance classes
|
|
* @param {Object} _main - prototype of resulting class
|
|
*
|
|
* @return {Object}
|
|
* */
|
|
var unique = function(_arr, _main) {
|
|
var result = Object.create(null);
|
|
var to_remove = [];
|
|
|
|
for (var i = 0, e = _arr.length; i != e; ++i) {
|
|
var item = _arr[i];
|
|
|
|
for (var key in item) {
|
|
if (key in result) {
|
|
to_remove.push(key);
|
|
continue;
|
|
}
|
|
|
|
result[key] = item[key];
|
|
}
|
|
|
|
if (keys in item) {
|
|
for (var ii = 0, ee = item[keys].length; ii != ee; ++ii) {
|
|
var key = item[keys][ii];
|
|
if (key in result) {
|
|
to_remove.push(key);
|
|
continue;
|
|
}
|
|
|
|
result[key] = item[key];
|
|
}
|
|
}
|
|
}
|
|
|
|
for (var i = 0; i != to_remove.length; ++i) {
|
|
delete result[to_remove[i]];
|
|
}
|
|
|
|
for (var key in _main) {
|
|
result[key] = _main[key];
|
|
}
|
|
|
|
return result;
|
|
};
|
|
|
|
/**
|
|
* Create OOP class
|
|
*
|
|
* @param {Function[]} _constrs - inheritance classes
|
|
* @param {Object} _proto - prototype of resulting class
|
|
* @param {Object?} _static - static data
|
|
*
|
|
* @return {Function}
|
|
* */
|
|
var class_creator = function(_constrs, _proto, _static) {
|
|
_constrs = _constrs || [];
|
|
_proto = _proto || [];
|
|
_static = _static || [];
|
|
|
|
var constr;
|
|
if (_proto && _proto.hasOwnProperty("constructor")) {
|
|
constr = _proto.constructor;
|
|
delete _proto.constructor;
|
|
} else {
|
|
constr = function() {
|
|
for (var i = 0; i != _constrs.length; ++i) {
|
|
_constrs[i].apply(this, arguments);
|
|
}
|
|
};
|
|
}
|
|
|
|
var proto = Object.create(null);
|
|
Object.defineProperty(proto, protos, {
|
|
"value": []
|
|
});
|
|
Object.defineProperty(proto, keys, {
|
|
"value": []
|
|
});
|
|
|
|
/************************FOR MEMBERS*******************************/
|
|
for (var i = 0, e = _constrs.length; i != e; ++i) {
|
|
proto[protos].push(_constrs[i].prototype);
|
|
}
|
|
|
|
var m_un = unique(proto[protos], _proto);
|
|
for (var key in m_un) {
|
|
proto[keys].push(key);
|
|
|
|
Object.defineProperty(proto, key, {
|
|
"value": m_un[key]
|
|
});
|
|
}
|
|
/************************FOR MEMBERS END***************************/
|
|
|
|
/************************FOR STATICS*******************************/
|
|
var s_un = unique(_constrs, _static);
|
|
for (var key in s_un) {
|
|
Object.defineProperty(constr, key, {
|
|
"value": s_un[key],
|
|
"enumerable": true
|
|
});
|
|
}
|
|
/************************FOR STATICS END***************************/
|
|
|
|
|
|
Object.defineProperties(constr, {
|
|
"pr": {
|
|
"value": proto
|
|
},
|
|
"prototype": {
|
|
"value": proto
|
|
}
|
|
});
|
|
|
|
Object.freeze(proto);
|
|
Object.freeze(constr);
|
|
|
|
return constr;
|
|
};
|
|
|
|
/**
|
|
* Check if target has prototype
|
|
*
|
|
* @param {Object} _target - checkable instance
|
|
* @param {Object} _proto - posible prototype
|
|
*
|
|
* */
|
|
var check = function(_target, _proto) {
|
|
for (var i = 0; i != _target[protos].length; ++i) {
|
|
var t_proto = _target[protos][i];
|
|
if (t_proto == _proto) {
|
|
return true;
|
|
}
|
|
|
|
if (t_proto[protos]) {
|
|
if (check(t_proto, _proto))
|
|
return true;
|
|
}
|
|
}
|
|
|
|
return false;
|
|
};
|
|
|
|
/**
|
|
* Check if target is instance of class
|
|
*
|
|
* @param {Object} _target - checkable instance
|
|
* @param {Function} _constr - posible constructor
|
|
*
|
|
* */
|
|
var class_check = function(_target, _constr) {
|
|
if (_target instanceof _constr) {
|
|
return true;
|
|
}
|
|
|
|
return check(_target, _constr.prototype);
|
|
};
|
|
|
|
return {
|
|
class: class_creator,
|
|
class_check: class_check
|
|
};
|
|
})();
|
|
var tools = {
|
|
merge: function(_obj) {
|
|
var target = Object.create(null);
|
|
var i = 0,
|
|
e = arguments.length;
|
|
for (; i != e; ++i) {
|
|
var options = arguments[i];
|
|
|
|
for (var key in options) {
|
|
if (options[key] === undefined && target === options[key])
|
|
continue;
|
|
|
|
target[key] = options[key];
|
|
}
|
|
}
|
|
|
|
return target;
|
|
}
|
|
};
|
|
|
|
var Node = std.class([], {
|
|
constructor: function Node(_options) {
|
|
var base = tools.merge({
|
|
children: []
|
|
}, _options);
|
|
|
|
this.children = base.children;
|
|
},
|
|
add: function(_n) {
|
|
this.children.push(_n);
|
|
return this;
|
|
}
|
|
});
|
|
|
|
var Lexeme = std.class([Node], {
|
|
constructor: function Lexeme(_options) {
|
|
var base = tools.merge({
|
|
start: -1,
|
|
end: -1,
|
|
type: null,
|
|
value: null
|
|
}, _options);
|
|
|
|
Node.call(this, base);
|
|
|
|
this.start = base.start;
|
|
this.end = base.end;
|
|
this.type = base.type;
|
|
this.value = base.value;
|
|
}
|
|
});
|
|
|
|
var Rule = std.class([Node], {
|
|
constructor: function NonTerminal(_options) {
|
|
var base = tools.merge({}, _options);
|
|
|
|
Node.call(this, base);
|
|
}
|
|
});
|
|
|
|
var terminal_literal = std.class([Rule], {
|
|
constructor: function terminal_literal(_options) {
|
|
var base = tools.merge({}, _options);
|
|
|
|
Rule.call(this, base);
|
|
},
|
|
position: function() {
|
|
var first_child = this.children[0];
|
|
|
|
return {
|
|
start: first_child.start,
|
|
end: first_child.end,
|
|
}
|
|
}
|
|
});
|
|
|
|
var string_literal = std.class([terminal_literal], {
|
|
constructor: function string_literal(_options) {
|
|
var base = tools.merge({}, _options);
|
|
|
|
terminal_literal.call(this, base);
|
|
}
|
|
});
|
|
|
|
var integer_literal = std.class([terminal_literal], {
|
|
constructor: function integer_literal(_options) {
|
|
var base = tools.merge({}, _options);
|
|
|
|
terminal_literal.call(this, base);
|
|
}
|
|
});
|
|
|
|
var float_literal = std.class([terminal_literal], {
|
|
constructor: function float_literal(_options) {
|
|
var base = tools.merge({}, _options);
|
|
|
|
terminal_literal.call(this, base);
|
|
}
|
|
});
|
|
|
|
var bool_literal = std.class([terminal_literal], {
|
|
constructor: function bool_literal(_options) {
|
|
var base = tools.merge({}, _options);
|
|
|
|
terminal_literal.call(this, base);
|
|
}
|
|
});
|
|
|
|
var id = std.class([Rule], {
|
|
constructor: function id(_options) {
|
|
var base = tools.merge({}, _options);
|
|
|
|
Rule.call(this, base);
|
|
},
|
|
position: function() {
|
|
var first_child = this.children[0];
|
|
|
|
if (std.class_check(first_child, Lexeme)) {
|
|
return {
|
|
start: first_child.start,
|
|
end: first_child.end,
|
|
}
|
|
} else {
|
|
return this.position();
|
|
}
|
|
}
|
|
});
|
|
|
|
// var literal = std.class([Rule], {
|
|
// constructor: function literal(_options) {
|
|
// var base = tools.merge({}, _options);
|
|
//
|
|
// Rule.call(this, base);
|
|
// }
|
|
// });
|
|
|
|
var expr_compares = std.class([Rule], {
|
|
constructor: function expr_compares(_options) {
|
|
var base = tools.merge({
|
|
lexpr: null,
|
|
op: null,
|
|
rexpr: null
|
|
}, _options);
|
|
|
|
Rule.call(this, base);
|
|
|
|
this.lexpr = base.lexpr;
|
|
this.op = base.op;
|
|
this.rexpr = base.rexpr;
|
|
},
|
|
position: function() {
|
|
return {
|
|
start: this.lexpr.position().start,
|
|
end: this.rexpr.position().end,
|
|
}
|
|
}
|
|
});
|
|
|
|
var and = std.class([Rule], {
|
|
constructor: function and(_options) {
|
|
var base = tools.merge({}, _options);
|
|
|
|
expr_compares.call(this, base);
|
|
}
|
|
});
|
|
|
|
var or = std.class([Rule], {
|
|
constructor: function or(_options) {
|
|
var base = tools.merge({}, _options);
|
|
|
|
expr_compares.call(this, base);
|
|
}
|
|
});
|
|
|
|
var not = std.class([Rule], {
|
|
constructor: function not(_options) {
|
|
var base = tools.merge({
|
|
op: null,
|
|
rexpr: null
|
|
}, _options);
|
|
|
|
Rule.call(this, base);
|
|
|
|
this.op = base.op;
|
|
this.rexpr = base.rexpr;
|
|
},
|
|
position: function() {
|
|
return {
|
|
start: this.op.start,
|
|
end: this.rexpr.position().end,
|
|
}
|
|
}
|
|
});
|
|
|
|
var endpoint_compares = std.class([Rule], {
|
|
constructor: function endpoint_compares(_options) {
|
|
var base = tools.merge({
|
|
id: null,
|
|
op: null,
|
|
literal: null
|
|
}, _options);
|
|
|
|
Rule.call(this, base);
|
|
|
|
this.id = base.id;
|
|
this.op = base.op;
|
|
this.literal = base.literal;
|
|
},
|
|
position: function() {
|
|
return {
|
|
start: this.id.start,
|
|
end: this.literal.end
|
|
}
|
|
}
|
|
});
|
|
|
|
var eq = std.class([endpoint_compares], {
|
|
constructor: function eq(_options) {
|
|
var base = tools.merge({}, _options);
|
|
|
|
endpoint_compares.call(this, base);
|
|
},
|
|
});
|
|
|
|
var neq = std.class([endpoint_compares], {
|
|
constructor: function neq(_options) {
|
|
var base = tools.merge({}, _options);
|
|
|
|
endpoint_compares.call(this, base);
|
|
},
|
|
});
|
|
|
|
var gt = std.class([endpoint_compares], {
|
|
constructor: function gt(_options) {
|
|
var base = tools.merge({}, _options);
|
|
|
|
endpoint_compares.call(this, base);
|
|
}
|
|
});
|
|
|
|
var gte = std.class([endpoint_compares], {
|
|
constructor: function gte(_options) {
|
|
var base = tools.merge({}, _options);
|
|
|
|
endpoint_compares.call(this, base);
|
|
}
|
|
});
|
|
|
|
var lt = std.class([endpoint_compares], {
|
|
constructor: function lt(_options) {
|
|
var base = tools.merge({}, _options);
|
|
|
|
endpoint_compares.call(this, base);
|
|
}
|
|
});
|
|
|
|
var lte = std.class([endpoint_compares], {
|
|
constructor: function lte(_options) {
|
|
var base = tools.merge({}, _options);
|
|
|
|
endpoint_compares.call(this, base);
|
|
}
|
|
});
|
|
|
|
var like = std.class([endpoint_compares], {
|
|
constructor: function like(_options) {
|
|
var base = tools.merge({}, _options);
|
|
|
|
endpoint_compares.call(this, base);
|
|
}
|
|
});
|
|
|
|
var nlike = std.class([endpoint_compares], {
|
|
constructor: function nlike(_options) {
|
|
var base = tools.merge({}, _options);
|
|
|
|
endpoint_compares.call(this, base);
|
|
}
|
|
});
|
|
|
|
// var expr = std.class([Rule], {
|
|
// constructor: function expr(_options) {
|
|
// var base = tools.merge({}, _options);
|
|
//
|
|
// Rule.call(this, base);
|
|
// }
|
|
// });
|
|
|
|
var sub_expr = std.class([Rule], {
|
|
constructor: function expr(_options) {
|
|
var base = tools.merge({
|
|
LCB: null,
|
|
expr: null,
|
|
RCB: null,
|
|
}, _options);
|
|
|
|
Rule.call(this, base);
|
|
|
|
this.LCB = base.LCB;
|
|
this.expr = base.expr;
|
|
this.RCB = base.RCB;
|
|
},
|
|
position: function() {
|
|
return {
|
|
start: this.LCB.start,
|
|
end: this.RCB.end
|
|
}
|
|
}
|
|
});
|
|
|
|
var address_literal_content = std.class([Rule], {
|
|
constructor: function address_literal_content(_options) {
|
|
var base = tools.merge({}, _options);
|
|
|
|
Rule.call(this, base);
|
|
}
|
|
});
|
|
|
|
var address_literal = std.class([Rule], {
|
|
constructor: function address_literal(_options) {
|
|
var base = tools.merge({
|
|
keyword: null,
|
|
LSB: null,
|
|
RSB: null,
|
|
}, _options);
|
|
|
|
Rule.call(this, base);
|
|
|
|
this.keyword = base.keyword;
|
|
this.LSB = base.LSB;
|
|
this.RSB = base.RSB;
|
|
},
|
|
position: function() {
|
|
return {
|
|
start: this.ADDRESS.start,
|
|
end: this.RSB.end
|
|
}
|
|
}
|
|
});
|
|
|
|
var oid_literal_content = std.class([Rule], {
|
|
constructor: function oid_literal_content(_options) {
|
|
var base = tools.merge({}, _options);
|
|
|
|
Rule.call(this, base);
|
|
}
|
|
});
|
|
|
|
var oid_literal = std.class([Rule], {
|
|
constructor: function oid_literal(_options) {
|
|
var base = tools.merge({
|
|
keyword: null,
|
|
LSB: null,
|
|
RSB: null,
|
|
}, _options);
|
|
|
|
Rule.call(this, base);
|
|
|
|
this.keyword = base.keyword;
|
|
this.LSB = base.LSB;
|
|
this.RSB = base.RSB;
|
|
},
|
|
position: function() {
|
|
return {
|
|
start: this.keyword.start,
|
|
end: this.RSB.end
|
|
}
|
|
}
|
|
});
|
|
|
|
var time_diff_literal = std.class([Rule], {
|
|
constructor: function time_diff_literal(_options) {
|
|
var base = tools.merge({
|
|
keyword: null,
|
|
LSB: null,
|
|
RSB: null,
|
|
days: -1,
|
|
hours: -1,
|
|
minutes: -1,
|
|
seconds: -1,
|
|
microseconds: -1
|
|
}, _options);
|
|
|
|
Rule.call(this, base);
|
|
|
|
this.keyword = base.keyword;
|
|
this.LSB = base.LSB;
|
|
this.RSB = base.RSB;
|
|
this.days = base.days;
|
|
this.hours = base.hours;
|
|
this.minutes = base.minutes;
|
|
this.seconds = base.seconds;
|
|
this.microseconds = base.microseconds;
|
|
},
|
|
position: function() {
|
|
return {
|
|
start: this.keyword.start,
|
|
end: this.RSB.end
|
|
}
|
|
}
|
|
});
|
|
|
|
|
|
|
|
return {
|
|
// terminal
|
|
LEXEME: Lexeme,
|
|
|
|
// not terminal
|
|
id: id,
|
|
string_literal: string_literal,
|
|
integer_literal: integer_literal,
|
|
float_literal: float_literal,
|
|
bool_literal: bool_literal,
|
|
address_literal: address_literal,
|
|
oid_literal: oid_literal,
|
|
time_diff_literal: time_diff_literal,
|
|
|
|
or: or,
|
|
and: and,
|
|
not: not,
|
|
|
|
eq: eq,
|
|
neq: neq,
|
|
gt: gt,
|
|
gte: gte,
|
|
lt: lt,
|
|
lte: lte,
|
|
like: like,
|
|
nlike: nlike,
|
|
|
|
// expr: expr,
|
|
sub_expr: sub_expr,
|
|
address_literal_content: address_literal_content,
|
|
oid_literal_content: oid_literal_content,
|
|
}
|
|
|
|
})();
|
|
|
|
|
|
|
|
|
|
var _result = {};
|
|
var LemonJS = function(_input) {
|
|
_result = Object.create(null);
|
|
var parser = new Parser();
|
|
var lexer = new Lexer(_input);
|
|
var token;
|
|
while (token = lexer.next()) {
|
|
if (token.error === 0) {
|
|
console.log("PARSE", token.lexeme);
|
|
parser.parse(parser["TOKEN_" + token.lexeme], token);
|
|
}
|
|
}
|
|
parser.parse();
|
|
return _result;
|
|
};
|
|
|
|
|
|
if (!fs.existsSync("tests")) {
|
|
fs.mkdirSync("tests");
|
|
}
|
|
|
|
var test_and = LemonJS("abc == 1 and abc1 == 2 and (bbc == 5)");
|
|
fs.writeFileSync("tests/test_and.json", JSON.stringify(test_and, true, 3));
|
|
|
|
var test_address = LemonJS('abc == Address ["a", "b", "c"]');
|
|
fs.writeFileSync("tests/test_address.json", JSON.stringify(test_address, true, 3));
|
|
|
|
var test_float = LemonJS('abc == 23.2');
|
|
fs.writeFileSync("tests/test_float.json", JSON.stringify(test_float, true, 3));
|
|
|
|
var test_string = LemonJS('abc == "sadfasdf"');
|
|
fs.writeFileSync("tests/test_string.json", JSON.stringify(test_string, true, 3));
|
|
|
|
var test_bool = LemonJS('abc == true or cab == false');
|
|
fs.writeFileSync("tests/test_bool.json", JSON.stringify(test_bool, true, 3));
|
|
|
|
var test_not = LemonJS('not cab == false');
|
|
fs.writeFileSync("tests/test_not.json", JSON.stringify(test_not, true, 3));
|
|
|
|
var test_oid = LemonJS('abc == Oid [a.b.d]');
|
|
fs.writeFileSync("tests/test_oid.json", JSON.stringify(test_oid, true, 3));
|
|
|
|
var test_timediff = LemonJS('add == TimeDiff [17924 15:01:24 441000]');
|
|
fs.writeFileSync("tests/test_timediff.json", JSON.stringify(test_timediff, true, 3));
|
|
|
|
var test_timediff_single = LemonJS('TimeDiff [17924 15:01:24 441000]');
|
|
fs.writeFileSync("tests/test_timediff_single.json", JSON.stringify(test_timediff_single, true, 3)); |