Refine search
@VisibleForTesting static Expr parse(String in, ExprMacroTable macroTable, boolean withFlatten) { ExprLexer lexer = new ExprLexer(new ANTLRInputStream(in)); CommonTokenStream tokens = new CommonTokenStream(lexer); ExprParser parser = new ExprParser(tokens); parser.setBuildParseTree(true); ParseTree parseTree = parser.expr(); ParseTreeWalker walker = new ParseTreeWalker(); ExprListenerImpl listener = new ExprListenerImpl(parseTree, macroTable); walker.walk(listener, parseTree); return withFlatten ? flatten(listener.getAST()) : listener.getAST(); }
CommonTokenStream tokens = new CommonTokenStream(lexer); List<Token> allTokens = tokens.getTokens(); if (stop != null && allTokens != null && !allTokens.isEmpty()) { Token last = allTokens.get(allTokens.size() - 1); boolean notEOF = last.getType() != Token.EOF; boolean lastGreaterThanDocument = last.getTokenIndex() > stop.getTokenIndex(); boolean sameChannel = last.getChannel() == stop.getChannel(); if (notEOF && lastGreaterThanDocument && sameChannel) {
/** Count EOF just once. */ public int getNumberOfOnChannelTokens() { int n = 0; fill(); for (int i = 0; i < tokens.size(); i++) { Token t = tokens.get(i); if ( t.getChannel()==channel ) n++; if ( t.getType()==Token.EOF ) break; } return n; } }
private static ParserRuleContext parseTypeCalculation(String calculation) { TypeCalculationLexer lexer = new TypeCalculationLexer(new CaseInsensitiveStream(new ANTLRInputStream(calculation))); CommonTokenStream tokenStream = new CommonTokenStream(lexer); TypeCalculationParser parser = new TypeCalculationParser(tokenStream); lexer.removeErrorListeners(); lexer.addErrorListener(ERROR_LISTENER); parser.removeErrorListeners(); parser.addErrorListener(ERROR_LISTENER); ParserRuleContext tree; try { // first, try parsing with potentially faster SLL mode parser.getInterpreter().setPredictionMode(PredictionMode.SLL); tree = parser.typeCalculation(); } catch (ParseCancellationException ex) { // if we fail, parse with LL mode tokenStream.reset(); // rewind input stream parser.reset(); parser.getInterpreter().setPredictionMode(PredictionMode.LL); tree = parser.typeCalculation(); } return tree; }
/** * Initializes logical expression lexer and parser, add error listener that converts all * syntax error into {@link org.apache.drill.common.exceptions.ExpressionParsingException}. * Parses given expression into logical expression instance. * * @param expr expression to be parsed * @return logical expression instance */ public static LogicalExpression parse(String expr) { ExprLexer lexer = new ExprLexer(CharStreams.fromString(expr)); lexer.removeErrorListeners(); // need to remove since default listener will output warning lexer.addErrorListener(ErrorListener.INSTANCE); CommonTokenStream tokens = new CommonTokenStream(lexer); ExprParser parser = new ExprParser(tokens); parser.removeErrorListeners(); // need to remove since default listener will output warning parser.addErrorListener(ErrorListener.INSTANCE); ExprParser.ParseContext parseContext = parser.parse(); logger.trace("Tokens: [{}]. Parsing details: [{}].", tokens.getText(), parseContext.toInfoString(parser)); return parseContext.e; }
/** * Execute statements from an include file */ void include(String content) throws Exception { InputStream input = new ByteArrayInputStream(content.getBytes("UTF-8")); HplsqlLexer lexer = new HplsqlLexer(new ANTLRInputStream(input)); CommonTokenStream tokens = new CommonTokenStream(lexer); HplsqlParser parser = new HplsqlParser(tokens); ParseTree tree = parser.program(); visit(tree); }
public XPathElement[] split(String path) { ANTLRInputStream in; try { in = new ANTLRInputStream(new StringReader(path)); lexer.removeErrorListeners(); lexer.addErrorListener(new XPathLexerErrorListener()); CommonTokenStream tokenStream = new CommonTokenStream(lexer); try { tokenStream.fill(); List<Token> tokens = tokenStream.getTokens(); Token el = tokens.get(i); Token next = null; switch ( el.getType() ) { case XPathLexer.ROOT : case XPathLexer.ANYWHERE : boolean anywhere = el.getType() == XPathLexer.ANYWHERE; i++; next = tokens.get(i); boolean invert = next.getType()==XPathLexer.BANG; if ( invert ) { i++;
private static List<SchemaChange> parseSQL(String currentDB, String sql) { ANTLRInputStream input = new ANTLRInputStream(sql); mysqlLexer lexer = new mysqlLexer(input); lexer.removeErrorListeners(); TokenStream tokens = new CommonTokenStream(lexer); LOGGER.debug("SQL_PARSE <- \"" + sql + "\""); mysqlParser parser = new mysqlParser(tokens); parser.removeErrorListeners(); MysqlParserListener listener = new MysqlParserListener(currentDB, tokens); ParseTree tree = parser.parse(); ParseTreeWalker.DEFAULT.walk(listener, tree); LOGGER.debug("SQL_PARSE -> " + tree.toStringTree(parser)); return listener.getSchemaChanges(); }
/** * Parse the raw EQL query and apply it to the supplied query. */ public static <T> void parse(String raw, SpiQuery<T> query) { EQLLexer lexer = new EQLLexer(CharStreams.fromString(raw)); CommonTokenStream tokens = new CommonTokenStream(lexer); EQLParser parser = new EQLParser(tokens); parser.addErrorListener(errorListener); EQLParser.Select_statementContext context = parser.select_statement(); EqlAdapter<T> adapter = new EqlAdapter<>(query); ParseTreeWalker walker = new ParseTreeWalker(); walker.walk(adapter, context); query.simplifyExpressions(); }
CommonTokenStream tokens = new CommonTokenStream(lexer); StatusCodeParser parser = new StatusCodeParser(tokens); Map<Integer, String> statusCodes = new LinkedHashMap<>(); ParseTreeWalker.DEFAULT.walk(listener, parser.text()); return statusCodes;
private static GlobNode parse(ANTLRInputStream input) throws GlobParseException { GlobLexer lexer = new GlobLexer(input); CommonTokenStream tokenStream = new CommonTokenStream(lexer); GlobParser parser = new GlobParser(tokenStream); parser.setErrorHandler(new BailErrorStrategy()); ParseTreeWalker walker = new ParseTreeWalker(); GlobListener listener = new GlobListener(); try { walker.walk(listener, parser.rootGlob()); } catch (ParseCancellationException e) { RecognitionException ex = ((RecognitionException) e.getCause()); throw new GlobParseException("Unable to parse glob: Error at token " + ex.getOffendingToken().getText() + " at position " + ex.getOffendingToken().getLine() + ":" + ex.getOffendingToken().getCharPositionInLine(), ex); } return listener.popNode(); }
private static SQLParser createSQLParser(final DatabaseType databaseType, final Lexer lexer) { TokenStream tokenStream = new CommonTokenStream(lexer); switch (databaseType) { case H2: case MySQL: return new MySQLParser(tokenStream); case PostgreSQL: return new PostgreSQLParser(tokenStream); case SQLServer: return new SQLServerParser(tokenStream); case Oracle: return new OracleParser(tokenStream); default: throw new UnsupportedOperationException(String.format("Can not support database type [%s].", databaseType)); } } }
public FormatListener() { this.lexer = new FeatureResultsLexer( new ANTLRInputStream( "" ) ); this.parser = new FeatureResultsParser( new CommonTokenStream( lexer ) ); this.walker = new ParseTreeWalker(); this.parameterVerifier = new ParameterVerifier(); this.resultsVerifier = new ResultsVerifier(); }
private ParserRuleContext getParseTree(final String sql) { final SqlBaseLexer sqlBaseLexer = new SqlBaseLexer( new CaseInsensitiveStream(CharStreams.fromString(sql))); final CommonTokenStream tokenStream = new CommonTokenStream(sqlBaseLexer); final SqlBaseParser sqlBaseParser = new SqlBaseParser(tokenStream); sqlBaseLexer.removeErrorListeners(); sqlBaseLexer.addErrorListener(ERROR_LISTENER); sqlBaseParser.removeErrorListeners(); sqlBaseParser.addErrorListener(ERROR_LISTENER); final Function<SqlBaseParser, ParserRuleContext> parseFunction = SqlBaseParser::statements; try { // first, try parsing with potentially faster SLL mode sqlBaseParser.getInterpreter().setPredictionMode(PredictionMode.SLL); return parseFunction.apply(sqlBaseParser); } catch (final ParseCancellationException ex) { // if we fail, parse with LL mode tokenStream.seek(0); // rewind input stream sqlBaseParser.reset(); sqlBaseParser.getInterpreter().setPredictionMode(PredictionMode.LL); return parseFunction.apply(sqlBaseParser); } }
CommonTokenStream tokenStream = new CommonTokenStream(lexer); SqlBaseParser parser = new SqlBaseParser(tokenStream); tokenStream.reset(); // rewind input stream parser.reset();
/** * 在语法解析器可以定位到错误单词的基础下获取错误单词 */ private String getOffendingSymbolWithHint(Recognizer<?, ?> recognizer, Object offendingSymbol) { Token token = (Token)offendingSymbol; String tokenText = token.getText(); if (tokenText.equals(SYMBOL_EOF)) { List<Token> allTokens = ((org.antlr.v4.runtime.CommonTokenStream)recognizer.getInputStream()).getTokens(); int tokensCount = allTokens.size(); return (tokensCount < MIN_SIZE_FOR_TOKENS) ? "" : allTokens.get(tokensCount - MIN_SIZE_FOR_TOKENS) .getText(); } return tokenText; } /**
List<? extends Token> tokenList = tokenize(pattern); ListTokenSource tokenSrc = new ListTokenSource(tokenList); CommonTokenStream tokens = new CommonTokenStream(tokenSrc); if ( tokens.LA(1)!=Token.EOF ) { throw new StartRuleDoesNotConsumeFullPattern();
public static CommonTokenStream tokenizeANTLRGrammar(String text) { ANTLRInputStream input = new ANTLRInputStream(text); ANTLRv4Lexer lexer = new ANTLRv4Lexer(input); CommonTokenStream tokens = new TokenStreamSubset(lexer); tokens.fill(); return tokens; }
private void extractComments() { for (Token token : tokenStream.getTokens()) { if (token.getChannel() != Token.HIDDEN_CHANNEL) { continue; } if (ListenerUtil.isSingleLineComment(token)) { singleLineComments.add(token); } if (ListenerUtil.isMultilineComment(token)) { multilineComments.add(token); } } } }