Tabnine Logo
CommonTokenStream
Code IndexAdd Tabnine to your IDE (free)

How to use
CommonTokenStream
in
org.antlr.v4.runtime

Best Java code snippets using org.antlr.v4.runtime.CommonTokenStream (Showing top 20 results out of 2,259)

Refine searchRefine arrow

  • ANTLRInputStream
  • Token
  • ParseTreeWalker
origin: apache/incubator-druid

@VisibleForTesting
static Expr parse(String in, ExprMacroTable macroTable, boolean withFlatten)
{
 ExprLexer lexer = new ExprLexer(new ANTLRInputStream(in));
 CommonTokenStream tokens = new CommonTokenStream(lexer);
 ExprParser parser = new ExprParser(tokens);
 parser.setBuildParseTree(true);
 ParseTree parseTree = parser.expr();
 ParseTreeWalker walker = new ParseTreeWalker();
 ExprListenerImpl listener = new ExprListenerImpl(parseTree, macroTable);
 walker.walk(listener, parseTree);
 return withFlatten ? flatten(listener.getAST()) : listener.getAST();
}
origin: graphql-java/graphql-java

CommonTokenStream tokens = new CommonTokenStream(lexer);
List<Token> allTokens = tokens.getTokens();
if (stop != null && allTokens != null && !allTokens.isEmpty()) {
  Token last = allTokens.get(allTokens.size() - 1);
  boolean notEOF = last.getType() != Token.EOF;
  boolean lastGreaterThanDocument = last.getTokenIndex() > stop.getTokenIndex();
  boolean sameChannel = last.getChannel() == stop.getChannel();
  if (notEOF && lastGreaterThanDocument && sameChannel) {
origin: org.antlr/antlr4-runtime

  /** Count EOF just once. */
  public int getNumberOfOnChannelTokens() {
    int n = 0;
    fill();
    for (int i = 0; i < tokens.size(); i++) {
      Token t = tokens.get(i);
      if ( t.getChannel()==channel ) n++;
      if ( t.getType()==Token.EOF ) break;
    }
    return n;
  }
}
origin: prestodb/presto

private static ParserRuleContext parseTypeCalculation(String calculation)
{
  TypeCalculationLexer lexer = new TypeCalculationLexer(new CaseInsensitiveStream(new ANTLRInputStream(calculation)));
  CommonTokenStream tokenStream = new CommonTokenStream(lexer);
  TypeCalculationParser parser = new TypeCalculationParser(tokenStream);
  lexer.removeErrorListeners();
  lexer.addErrorListener(ERROR_LISTENER);
  parser.removeErrorListeners();
  parser.addErrorListener(ERROR_LISTENER);
  ParserRuleContext tree;
  try {
    // first, try parsing with potentially faster SLL mode
    parser.getInterpreter().setPredictionMode(PredictionMode.SLL);
    tree = parser.typeCalculation();
  }
  catch (ParseCancellationException ex) {
    // if we fail, parse with LL mode
    tokenStream.reset(); // rewind input stream
    parser.reset();
    parser.getInterpreter().setPredictionMode(PredictionMode.LL);
    tree = parser.typeCalculation();
  }
  return tree;
}
origin: apache/drill

/**
 * Initializes logical expression lexer and parser, add error listener that converts all
 * syntax error into {@link org.apache.drill.common.exceptions.ExpressionParsingException}.
 * Parses given expression into logical expression instance.
 *
 * @param expr expression to be parsed
 * @return logical expression instance
 */
public static LogicalExpression parse(String expr) {
 ExprLexer lexer = new ExprLexer(CharStreams.fromString(expr));
 lexer.removeErrorListeners(); // need to remove since default listener will output warning
 lexer.addErrorListener(ErrorListener.INSTANCE);
 CommonTokenStream tokens = new CommonTokenStream(lexer);
 ExprParser parser = new ExprParser(tokens);
 parser.removeErrorListeners(); // need to remove since default listener will output warning
 parser.addErrorListener(ErrorListener.INSTANCE);
 ExprParser.ParseContext parseContext = parser.parse();
 logger.trace("Tokens: [{}]. Parsing details: [{}].", tokens.getText(), parseContext.toInfoString(parser));
 return parseContext.e;
}
origin: apache/hive

/**
 * Execute statements from an include file
 */
void include(String content) throws Exception {
 InputStream input = new ByteArrayInputStream(content.getBytes("UTF-8"));
 HplsqlLexer lexer = new HplsqlLexer(new ANTLRInputStream(input));
 CommonTokenStream tokens = new CommonTokenStream(lexer);
 HplsqlParser parser = new HplsqlParser(tokens);
 ParseTree tree = parser.program(); 
 visit(tree);    
}

origin: org.antlr/antlr4-runtime

public XPathElement[] split(String path) {
  ANTLRInputStream in;
  try {
    in = new ANTLRInputStream(new StringReader(path));
  lexer.removeErrorListeners();
  lexer.addErrorListener(new XPathLexerErrorListener());
  CommonTokenStream tokenStream = new CommonTokenStream(lexer);
  try {
    tokenStream.fill();
  List<Token> tokens = tokenStream.getTokens();
    Token el = tokens.get(i);
    Token next = null;
    switch ( el.getType() ) {
      case XPathLexer.ROOT :
      case XPathLexer.ANYWHERE :
        boolean anywhere = el.getType() == XPathLexer.ANYWHERE;
        i++;
        next = tokens.get(i);
        boolean invert = next.getType()==XPathLexer.BANG;
        if ( invert ) {
          i++;
origin: zendesk/maxwell

private static List<SchemaChange> parseSQL(String currentDB, String sql) {
  ANTLRInputStream input = new ANTLRInputStream(sql);
  mysqlLexer lexer = new mysqlLexer(input);
  lexer.removeErrorListeners();
  TokenStream tokens = new CommonTokenStream(lexer);
  LOGGER.debug("SQL_PARSE <- \"" + sql + "\"");
  mysqlParser parser = new mysqlParser(tokens);
  parser.removeErrorListeners();
  MysqlParserListener listener = new MysqlParserListener(currentDB, tokens);
  ParseTree tree = parser.parse();
  ParseTreeWalker.DEFAULT.walk(listener, tree);
  LOGGER.debug("SQL_PARSE ->   " + tree.toStringTree(parser));
  return listener.getSchemaChanges();
}
origin: ebean-orm/ebean

/**
 * Parse the raw EQL query and apply it to the supplied query.
 */
public static <T> void parse(String raw, SpiQuery<T> query) {
 EQLLexer lexer = new EQLLexer(CharStreams.fromString(raw));
 CommonTokenStream tokens = new CommonTokenStream(lexer);
 EQLParser parser = new EQLParser(tokens);
 parser.addErrorListener(errorListener);
 EQLParser.Select_statementContext context = parser.select_statement();
 EqlAdapter<T> adapter = new EqlAdapter<>(query);
 ParseTreeWalker walker = new ParseTreeWalker();
 walker.walk(adapter, context);
 query.simplifyExpressions();
}
origin: jooby-project/jooby

CommonTokenStream tokens = new CommonTokenStream(lexer);
StatusCodeParser parser = new StatusCodeParser(tokens);
Map<Integer, String> statusCodes = new LinkedHashMap<>();
ParseTreeWalker.DEFAULT.walk(listener, parser.text());
return statusCodes;
origin: PEXPlugins/PermissionsEx

private static GlobNode parse(ANTLRInputStream input) throws GlobParseException {
  GlobLexer lexer = new GlobLexer(input);
  CommonTokenStream tokenStream = new CommonTokenStream(lexer);
  GlobParser parser = new GlobParser(tokenStream);
  parser.setErrorHandler(new BailErrorStrategy());
  ParseTreeWalker walker = new ParseTreeWalker();
  GlobListener listener = new GlobListener();
  try {
    walker.walk(listener, parser.rootGlob());
  } catch (ParseCancellationException e) {
    RecognitionException ex = ((RecognitionException) e.getCause());
    throw new GlobParseException("Unable to parse glob: Error at token " + ex.getOffendingToken().getText() + " at position " +  ex.getOffendingToken().getLine() + ":" + ex.getOffendingToken().getCharPositionInLine(), ex);
  }
  return listener.popNode();
}
origin: apache/incubator-shardingsphere

  private static SQLParser createSQLParser(final DatabaseType databaseType, final Lexer lexer) {
    TokenStream tokenStream = new CommonTokenStream(lexer);
    switch (databaseType) {
      case H2:
      case MySQL:
        return new MySQLParser(tokenStream);
      case PostgreSQL:
        return new PostgreSQLParser(tokenStream);
      case SQLServer:
        return new SQLServerParser(tokenStream);
      case Oracle:
        return new OracleParser(tokenStream);
      default:
        throw new UnsupportedOperationException(String.format("Can not support database type [%s].", databaseType));
    }
  }
}
origin: org.opencypher/tck-api

public FormatListener()
{
  this.lexer = new FeatureResultsLexer( new ANTLRInputStream( "" ) );
  this.parser = new FeatureResultsParser( new CommonTokenStream( lexer ) );
  this.walker = new ParseTreeWalker();
  this.parameterVerifier = new ParameterVerifier();
  this.resultsVerifier = new ResultsVerifier();
}
origin: org.bitbucket.goalhub.grammar/languageTools

/**
 * Dumps all tokens to console.
 */
public void printLexerTokens() {
  for (Token token : this.tokens.getTokens()) {
    System.out.print("'" + token.getText() + "<" + token.getType() + ">' ");
  }
}
origin: confluentinc/ksql

private ParserRuleContext getParseTree(final String sql) {
 final SqlBaseLexer sqlBaseLexer = new SqlBaseLexer(
   new CaseInsensitiveStream(CharStreams.fromString(sql)));
 final CommonTokenStream tokenStream = new CommonTokenStream(sqlBaseLexer);
 final SqlBaseParser sqlBaseParser = new SqlBaseParser(tokenStream);
 sqlBaseLexer.removeErrorListeners();
 sqlBaseLexer.addErrorListener(ERROR_LISTENER);
 sqlBaseParser.removeErrorListeners();
 sqlBaseParser.addErrorListener(ERROR_LISTENER);
 final Function<SqlBaseParser, ParserRuleContext> parseFunction = SqlBaseParser::statements;
 try {
  // first, try parsing with potentially faster SLL mode
  sqlBaseParser.getInterpreter().setPredictionMode(PredictionMode.SLL);
  return parseFunction.apply(sqlBaseParser);
 } catch (final ParseCancellationException ex) {
  // if we fail, parse with LL mode
  tokenStream.seek(0); // rewind input stream
  sqlBaseParser.reset();
  sqlBaseParser.getInterpreter().setPredictionMode(PredictionMode.LL);
  return parseFunction.apply(sqlBaseParser);
 }
}
origin: prestodb/presto

CommonTokenStream tokenStream = new CommonTokenStream(lexer);
SqlBaseParser parser = new SqlBaseParser(tokenStream);
  tokenStream.reset(); // rewind input stream
  parser.reset();
origin: HuaweiBigData/StreamCQL

/**
 * 在语法解析器可以定位到错误单词的基础下获取错误单词
 */
private String getOffendingSymbolWithHint(Recognizer<?, ?> recognizer, Object offendingSymbol)
{
  Token token = (Token)offendingSymbol;
  String tokenText = token.getText();
  if (tokenText.equals(SYMBOL_EOF))
  {
    List<Token> allTokens = ((org.antlr.v4.runtime.CommonTokenStream)recognizer.getInputStream()).getTokens();
    int tokensCount = allTokens.size();
    return (tokensCount < MIN_SIZE_FOR_TOKENS) ? "" : allTokens.get(tokensCount - MIN_SIZE_FOR_TOKENS)
      .getText();
  }
  return tokenText;
}
/**
origin: org.antlr/antlr4-runtime

List<? extends Token> tokenList = tokenize(pattern);
ListTokenSource tokenSrc = new ListTokenSource(tokenList);
CommonTokenStream tokens = new CommonTokenStream(tokenSrc);
if ( tokens.LA(1)!=Token.EOF ) {
  throw new StartRuleDoesNotConsumeFullPattern();
origin: antlr/intellij-plugin-v4

public static CommonTokenStream tokenizeANTLRGrammar(String text) {
  ANTLRInputStream input = new ANTLRInputStream(text);
  ANTLRv4Lexer lexer = new ANTLRv4Lexer(input);
  CommonTokenStream tokens = new TokenStreamSubset(lexer);
  tokens.fill();
  return tokens;
}
origin: sleekbyte/tailor

  private void extractComments() {
    for (Token token : tokenStream.getTokens()) {
      if (token.getChannel() != Token.HIDDEN_CHANNEL) {
        continue;
      }
      if (ListenerUtil.isSingleLineComment(token)) {
        singleLineComments.add(token);
      }
      if (ListenerUtil.isMultilineComment(token)) {
        multilineComments.add(token);
      }
    }
  }
}
org.antlr.v4.runtimeCommonTokenStream

Javadoc

This class extends BufferedTokenStream with functionality to filter token streams to tokens on a particular channel (tokens where Token#getChannel returns a particular value).

This token stream provides access to all tokens by index or when calling methods like #getText. The channel filtering is only used for code accessing tokens via the lookahead methods #LA, #LT, and #LB.

By default, tokens are placed on the default channel ( Token#DEFAULT_CHANNEL), but may be reassigned by using the ->channel(HIDDEN) lexer command, or by using an embedded action to call Lexer#setChannel.

Note: lexer rules which use the ->skip lexer command or call Lexer#skip do not produce tokens at all, so input text matched by such a rule will not be available as part of the token stream, regardless of channel.

Most used methods

  • <init>
    Constructs a new CommonTokenStream using the specified token source and filtering tokens to the spec
  • getTokens
  • fill
  • get
  • getHiddenTokensToLeft
  • size
  • LA
  • getHiddenTokensToRight
  • reset
  • seek
  • LB
  • getText
  • LB,
  • getText,
  • getTokenSource,
  • lazyInit,
  • nextTokenOnChannel,
  • previousTokenOnChannel,
  • sync,
  • LT,
  • consume,
  • index

Popular in Java

  • Finding current android device location
  • getOriginalFilename (MultipartFile)
    Return the original filename in the client's filesystem.This may contain path information depending
  • getExternalFilesDir (Context)
  • onRequestPermissionsResult (Fragment)
  • InputStreamReader (java.io)
    A class for turning a byte stream into a character stream. Data read from the source input stream is
  • OutputStream (java.io)
    A writable sink for bytes.Most clients will use output streams that write data to the file system (
  • URI (java.net)
    A Uniform Resource Identifier that identifies an abstract or physical resource, as specified by RFC
  • DataSource (javax.sql)
    An interface for the creation of Connection objects which represent a connection to a database. This
  • BoxLayout (javax.swing)
  • JTable (javax.swing)
  • Top plugins for Android Studio
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now