/** Compute the set of valid tokens that can occur starting in state {@code s}. * If {@code ctx} is null, the set of tokens will not include what can follow * the rule surrounding {@code s}. In other words, the set will be * restricted to tokens reachable staying within {@code s}'s rule. */ public IntervalSet nextTokens(ATNState s, RuleContext ctx) { LL1Analyzer anal = new LL1Analyzer(this); IntervalSet next = anal.LOOK(s, ctx); return next; }
/** Compute the set of valid tokens that can occur starting in state {@code s}. * If {@code ctx} is null, the set of tokens will not include what can follow * the rule surrounding {@code s}. In other words, the set will be * restricted to tokens reachable staying within {@code s}'s rule. */ public IntervalSet nextTokens(ATNState s, RuleContext ctx) { LL1Analyzer anal = new LL1Analyzer(this); IntervalSet next = anal.LOOK(s, ctx); return next; }
/** Compute the set of valid tokens that can occur starting in state {@code s}. * If {@code ctx} is null, the set of tokens will not include what can follow * the rule surrounding {@code s}. In other words, the set will be * restricted to tokens reachable staying within {@code s}'s rule. */ public IntervalSet nextTokens(ATNState s, RuleContext ctx) { LL1Analyzer anal = new LL1Analyzer(this); IntervalSet next = anal.LOOK(s, ctx); return next; }
/** Compute the set of valid tokens that can occur starting in state {@code s}. * If {@code ctx} is null, the set of tokens will not include what can follow * the rule surrounding {@code s}. In other words, the set will be * restricted to tokens reachable staying within {@code s}'s rule. */ public IntervalSet nextTokens(ATNState s, RuleContext ctx) { LL1Analyzer anal = new LL1Analyzer(this); IntervalSet next = anal.LOOK(s, ctx); return next; }
/** Compute the set of valid tokens that can occur starting in state {@code s}. * If {@code ctx} is {@link PredictionContext#EMPTY_LOCAL}, the set of tokens will not include what can follow * the rule surrounding {@code s}. In other words, the set will be * restricted to tokens reachable staying within {@code s}'s rule. */ @NotNull public IntervalSet nextTokens(ATNState s, @NotNull PredictionContext ctx) { Args.notNull("ctx", ctx); LL1Analyzer anal = new LL1Analyzer(this); IntervalSet next = anal.LOOK(s, ctx); return next; }
protected void processParser() { g.decisionLOOK = new ArrayList<IntervalSet[]>(g.atn.getNumberOfDecisions()+1); for (DecisionState s : g.atn.decisionToState) { g.tool.log("LL1", "\nDECISION "+s.decision+" in rule "+g.getRule(s.ruleIndex).name); IntervalSet[] look; if ( s.nonGreedy ) { // nongreedy decisions can't be LL(1) look = new IntervalSet[s.getNumberOfTransitions()+1]; } else { LL1Analyzer anal = new LL1Analyzer(g.atn); look = anal.getDecisionLookahead(s); g.tool.log("LL1", "look=" + Arrays.toString(look)); } assert s.decision + 1 >= g.decisionLOOK.size(); Utils.setSize(g.decisionLOOK, s.decision+1); g.decisionLOOK.set(s.decision, look); g.tool.log("LL1", "LL(1)? " + disjoint(look)); } }
protected void processParser() { g.decisionLOOK = new ArrayList<IntervalSet[]>(g.atn.getNumberOfDecisions()+1); for (DecisionState s : g.atn.decisionToState) { g.tool.log("LL1", "\nDECISION "+s.decision+" in rule "+g.getRule(s.ruleIndex).name); IntervalSet[] look; if ( s.nonGreedy ) { // nongreedy decisions can't be LL(1) look = new IntervalSet[s.getNumberOfTransitions()+1]; } else { LL1Analyzer anal = new LL1Analyzer(g.atn); look = anal.getDecisionLookahead(s); g.tool.log("LL1", "look=" + Arrays.toString(look)); } assert s.decision + 1 >= g.decisionLOOK.size(); Utils.setSize(g.decisionLOOK, s.decision+1); g.decisionLOOK.set(s.decision, look); g.tool.log("LL1", "LL(1)? " + disjoint(look)); } }
protected void processParser() { g.decisionLOOK = new ArrayList<IntervalSet[]>(g.atn.getNumberOfDecisions()+1); for (DecisionState s : g.atn.decisionToState) { g.tool.log("LL1", "\nDECISION "+s.decision+" in rule "+g.getRule(s.ruleIndex).name); IntervalSet[] look; if ( s.nonGreedy ) { // nongreedy decisions can't be LL(1) look = new IntervalSet[s.getNumberOfTransitions()+1]; } else { LL1Analyzer anal = new LL1Analyzer(g.atn); look = anal.getDecisionLookahead(s); g.tool.log("LL1", "look=" + Arrays.toString(look)); } assert s.decision + 1 >= g.decisionLOOK.size(); Utils.setSize(g.decisionLOOK, s.decision+1); g.decisionLOOK.set(s.decision, look); g.tool.log("LL1", "LL(1)? " + disjoint(look)); } }
protected void processParser() { g.decisionLOOK = new ArrayList<IntervalSet[]>(g.atn.getNumberOfDecisions()+1); for (DecisionState s : g.atn.decisionToState) { g.tool.log("LL1", "\nDECISION "+s.decision+" in rule "+g.getRule(s.ruleIndex).name); IntervalSet[] look; if ( s.nonGreedy ) { // nongreedy decisions can't be LL(1) look = new IntervalSet[s.getNumberOfTransitions()+1]; } else { LL1Analyzer anal = new LL1Analyzer(g.atn); look = anal.getDecisionLookahead(s); g.tool.log("LL1", "look=" + Arrays.toString(look)); } assert s.decision + 1 >= g.decisionLOOK.size(); Utils.setSize(g.decisionLOOK, s.decision+1); g.decisionLOOK.set(s.decision, look); g.tool.log("LL1", "LL(1)? " + disjoint(look)); } }
protected void processParser() { g.decisionLOOK = new ArrayList<IntervalSet[]>(g.atn.getNumberOfDecisions()+1); for (DecisionState s : g.atn.decisionToState) { g.tool.log("LL1", "\nDECISION "+s.decision+" in rule "+g.getRule(s.ruleIndex).name); IntervalSet[] look; if ( s.nonGreedy ) { // nongreedy decisions can't be LL(1) look = new IntervalSet[s.getNumberOfTransitions()+1]; } else { LL1Analyzer anal = new LL1Analyzer(g.atn); look = anal.getDecisionLookahead(s); g.tool.log("LL1", "look=" + Arrays.toString(look)); } assert s.decision + 1 >= g.decisionLOOK.size(); Utils.setSize(g.decisionLOOK, s.decision+1); g.decisionLOOK.set(s.decision, look); g.tool.log("LL1", "LL(1)? " + disjoint(look)); } }
protected void processLexer() { // make sure all non-fragment lexer rules must match at least one symbol for (Rule rule : g.rules.values()) { if (rule.isFragment()) { continue; } LL1Analyzer analyzer = new LL1Analyzer(g.atn); IntervalSet look = analyzer.LOOK(g.atn.ruleToStartState[rule.index], null); if (look.contains(Token.EPSILON)) { g.tool.errMgr.grammarError(ErrorType.EPSILON_TOKEN, g.fileName, ((GrammarAST)rule.ast.getChild(0)).getToken(), rule.name); } } }
protected void processLexer() { // make sure all non-fragment lexer rules must match at least one symbol for (Rule rule : g.rules.values()) { if (rule.isFragment()) { continue; } LL1Analyzer analyzer = new LL1Analyzer(g.atn); IntervalSet look = analyzer.LOOK(g.atn.ruleToStartState[rule.index], null); if (look.contains(Token.EPSILON)) { g.tool.errMgr.grammarError(ErrorType.EPSILON_TOKEN, g.fileName, ((GrammarAST)rule.ast.getChild(0)).getToken(), rule.name); } } }
protected void processLexer() { // make sure all non-fragment lexer rules must match at least one symbol for (Rule rule : g.rules.values()) { if (rule.isFragment()) { continue; } LL1Analyzer analyzer = new LL1Analyzer(g.atn); IntervalSet look = analyzer.LOOK(g.atn.ruleToStartState[rule.index], null); if (look.contains(Token.EPSILON)) { g.tool.errMgr.grammarError(ErrorType.EPSILON_TOKEN, g.fileName, ((GrammarAST)rule.ast.getChild(0)).getToken(), rule.name); } } }
protected void processLexer() { // make sure all non-fragment lexer rules must match at least one symbol for (Rule rule : g.rules.values()) { if (rule.isFragment()) { continue; } LL1Analyzer analyzer = new LL1Analyzer(g.atn); IntervalSet look = analyzer.LOOK(g.atn.ruleToStartState[rule.index], PredictionContext.EMPTY_LOCAL); if (look.contains(Token.EPSILON)) { g.tool.errMgr.grammarError(ErrorType.EPSILON_TOKEN, g.fileName, ((GrammarAST)rule.ast.getChild(0)).getToken(), rule.name); } } }
protected void processLexer() { // make sure all non-fragment lexer rules must match at least one symbol for (Rule rule : g.rules.values()) { if (rule.isFragment()) { continue; } LL1Analyzer analyzer = new LL1Analyzer(g.atn); IntervalSet look = analyzer.LOOK(g.atn.ruleToStartState[rule.index], null); if (look.contains(Token.EPSILON)) { g.tool.errMgr.grammarError(ErrorType.EPSILON_TOKEN, g.fileName, ((GrammarAST)rule.ast.getChild(0)).getToken(), rule.name); } } }
LL1Analyzer analyzer = new LL1Analyzer(atn); ATNState blkStart = pair.b; ATNState blkStop = pair.c; LL1Analyzer analyzer = new LL1Analyzer(atn); if (analyzer.LOOK(startState, pair.c, null).contains(org.antlr.v4.runtime.Token.EPSILON)) { g.tool.errMgr.grammarError(ErrorType.EPSILON_OPTIONAL, g.fileName, ((GrammarAST)pair.a.ast.getChild(0)).getToken(), pair.a.name);
LL1Analyzer analyzer = new LL1Analyzer(atn); if (analyzer.LOOK(pair.b, pair.c, null).contains(org.antlr.v4.runtime.Token.EPSILON)) { ErrorType errorType = pair.a instanceof LeftRecursiveRule ? ErrorType.EPSILON_LR_FOLLOW : ErrorType.EPSILON_CLOSURE; LL1Analyzer analyzer = new LL1Analyzer(atn); if (analyzer.LOOK(startState, pair.c, null).contains(org.antlr.v4.runtime.Token.EPSILON)) { g.tool.errMgr.grammarError(ErrorType.EPSILON_OPTIONAL, g.fileName, ((GrammarAST)pair.a.ast.getChild(0)).getToken(), pair.a.name);
LL1Analyzer analyzer = new LL1Analyzer(atn); if (analyzer.LOOK(pair.b, pair.c, null).contains(org.antlr.v4.runtime.Token.EPSILON)) { ErrorType errorType = pair.a instanceof LeftRecursiveRule ? ErrorType.EPSILON_LR_FOLLOW : ErrorType.EPSILON_CLOSURE; LL1Analyzer analyzer = new LL1Analyzer(atn); if (analyzer.LOOK(startState, pair.c, null).contains(org.antlr.v4.runtime.Token.EPSILON)) { g.tool.errMgr.grammarError(ErrorType.EPSILON_OPTIONAL, g.fileName, ((GrammarAST)pair.a.ast.getChild(0)).getToken(), pair.a.name);
LL1Analyzer analyzer = new LL1Analyzer(atn); ATNState blkStart = pair.b; ATNState blkStop = pair.c; LL1Analyzer analyzer = new LL1Analyzer(atn); if (analyzer.LOOK(startState, pair.c, null).contains(org.antlr.v4.runtime.Token.EPSILON)) { g.tool.errMgr.grammarError(ErrorType.EPSILON_OPTIONAL, g.fileName, ((GrammarAST)pair.a.ast.getChild(0)).getToken(), pair.a.name);
LL1Analyzer analyzer = new LL1Analyzer(atn); ATNState blkStart = pair.getItem2(); ATNState blkStop = pair.getItem3(); LL1Analyzer analyzer = new LL1Analyzer(atn); if (analyzer.LOOK(startState, pair.getItem3(), PredictionContext.EMPTY_LOCAL).contains(org.antlr.v4.runtime.Token.EPSILON)) { g.tool.errMgr.grammarError(ErrorType.EPSILON_OPTIONAL, g.fileName, ((GrammarAST)pair.getItem1().ast.getChild(0)).getToken(), pair.getItem1().name);