/** * {@inheritDoc} * * @see org.modeshape.graph.query.parse.QueryParser#parseQuery(String, TypeSystem) */ public QueryCommand parseQuery( String query, TypeSystem typeSystem ) { Tokenizer tokenizer = new SqlTokenizer(false); TokenStream tokens = new TokenStream(query, tokenizer, false); tokens.start(); return parseQueryCommand(tokens, typeSystem); }
@Override public QueryCommand parseQuery( String query, TypeSystem typeSystem ) { Tokenizer tokenizer = new SqlTokenizer(false); TokenStream tokens = new TokenStream(query, tokenizer, false); tokens.start(); return parseQueryCommand(tokens, typeSystem); }
@Override public QueryCommand parseQuery( String query, TypeSystem typeSystem ) { Tokenizer tokenizer = new SqlTokenizer(false); TokenStream tokens = new TokenStream(query, tokenizer, false); tokens.start(); return parseQueryCommand(tokens, typeSystem); }
public void makeCaseInsensitive() { tokens = new TokenStream(content, tokenizer, false); tokens.start(); }
public void makeCaseSensitive() { tokens = new TokenStream(content, tokenizer, true); tokens.start(); }
/** * Parse the full-text search criteria given in the supplied string. * * @param fullTextSearchExpression the full-text search expression; may not be null * @return the term representation of the full-text search, or null if there are no terms * @throws ParsingException if there is an error parsing the supplied string * @throws IllegalArgumentException if the expression is null */ public Term parse( String fullTextSearchExpression ) { CheckArg.isNotNull(fullTextSearchExpression, "fullTextSearchExpression"); Tokenizer tokenizer = new TermTokenizer(); TokenStream stream = new TokenStream(fullTextSearchExpression, tokenizer, false); return parse(stream.start()); }
/** * Parse the full-text search criteria given in the supplied string. * * @param fullTextSearchExpression the full-text search expression; may not be null * @return the term representation of the full-text search, or null if there are no terms * @throws ParsingException if there is an error parsing the supplied string * @throws IllegalArgumentException if the expression is null */ public Term parse( String fullTextSearchExpression ) { CheckArg.isNotNull(fullTextSearchExpression, "fullTextSearchExpression"); Tokenizer tokenizer = new TermTokenizer(); TokenStream stream = new TokenStream(fullTextSearchExpression, tokenizer, false); return parse(stream.start()); }
/** * Parse the full-text search criteria given in the supplied string. * * @param fullTextSearchExpression the full-text search expression; may not be null * @return the term representation of the full-text search, or null if there are no terms * @throws ParsingException if there is an error parsing the supplied string * @throws IllegalArgumentException if the expression is null */ public Term parse( String fullTextSearchExpression ) { CheckArg.isNotNull(fullTextSearchExpression, "fullTextSearchExpression"); Tokenizer tokenizer = new TermTokenizer(); TokenStream stream = new TokenStream(fullTextSearchExpression, tokenizer, false); return parse(stream.start()); }
protected TokenStream tokenize( String xpath ) { Tokenizer tokenizer = new XPathParser.XPathTokenizer(false); // skip comments return new TokenStream(xpath, tokenizer, true).start(); // case sensitive!! }
public List<Statement> parse( String ddl ) { TokenStream tokens = new TokenStream(ddl, TokenStream.basicTokenizer(false), false); List<Statement> statements = new LinkedList<Statement>(); tokens.start(); while (tokens.hasNext()) { if (tokens.matches("SELECT")) { statements.add(parseSelect(tokens)); } else { statements.add(parseDelete(tokens)); } } return statements; }
protected TokenStream tokens( String content ) { return new TokenStream(content, new BasicSqlQueryParser.SqlTokenizer(false), false).start(); }
protected TokenStream tokens( String content ) { return new TokenStream(content, new SqlQueryParser.SqlTokenizer(false), false).start(); }
protected TokenStream tokens( String content ) { return new TokenStream(content, new BasicSqlQueryParser.SqlTokenizer(false), false).start(); }
protected TokenStream tokens( String content ) { return new TokenStream(content, new BasicSqlQueryParser.SqlTokenizer(false), false).start(); }
public Component parseXPath( String xpath ) { Tokenizer tokenizer = new XPathTokenizer(false); // skip comments TokenStream tokens = new TokenStream(xpath, tokenizer, true).start(); // case sensitive!! //do parsing Component component = parseXPath(tokens); //no more tokens should remain, other wise this is not valid statement if (tokens.hasNext()) { throw new ParsingException(tokens.nextPosition(), "Unexpected token: " + tokens.consume() + " at line " + tokens.nextPosition().getLine() + ", column " + tokens.nextPosition().getColumn()); } return component; }
public Component parseXPath( String xpath ) { Tokenizer tokenizer = new XPathTokenizer(false); // skip comments TokenStream tokens = new TokenStream(xpath, tokenizer, true).start(); // case sensitive!! //do parsing Component component = parseXPath(tokens); //no more tokens should remain, other wise this is not valid statement if (tokens.hasNext()) { throw new ParsingException(tokens.nextPosition(), "Unexpected token: " + tokens.consume() + " at line " + tokens.nextPosition().getLine() + ", column " + tokens.nextPosition().getColumn()); } return component; }
/** * Parse the CND content. * * @param content the content * @throws ParsingException if there is a problem parsing the content */ protected void parse( String content ) { Tokenizer tokenizer = new CndTokenizer(false, true); TokenStream tokens = new TokenStream(content, tokenizer, false); tokens.start(); while (tokens.hasNext()) { // Keep reading while we can recognize one of the two types of statements ... if (tokens.matches("<", ANY_VALUE, "=", ANY_VALUE, ">")) { parseNamespaceMapping(tokens); } else if (tokens.matches("[", ANY_VALUE, "]")) { parseNodeTypeDefinition(tokens); } else { Position position = tokens.previousPosition(); throw new ParsingException(position, CndI18n.expectedNamespaceOrNodeDefinition.text(tokens.consume(), position.getLine(), position.getColumn())); } } }
/** * Parse the CND content. * * @param content the content * @throws ParsingException if there is a problem parsing the content */ protected void parse( String content ) { Tokenizer tokenizer = new CndTokenizer(false, true); TokenStream tokens = new TokenStream(content, tokenizer, false); tokens.start(); while (tokens.hasNext()) { // Keep reading while we can recognize one of the two types of statements ... if (tokens.matches("<", ANY_VALUE, "=", ANY_VALUE, ">")) { parseNamespaceMapping(tokens); } else if (tokens.matches("[", ANY_VALUE, "]")) { parseNodeTypeDefinition(tokens); } else { Position position = tokens.previousPosition(); throw new ParsingException(position, CndI18n.expectedNamespaceOrNodeDefinition.text(tokens.consume(), position.getLine(), position.getColumn())); } } }
@Test public void shouldParseMultiLineString() { makeCaseInsensitive(); String content = "ALTER DATABASE \n" + "DO SOMETHING; \n" + "ALTER DATABASE \n" + " SET DEFAULT BIGFILE TABLESPACE;"; tokens = new TokenStream(content, tokenizer, true); tokens.start(); tokens.consume(); // LINE tokens.consume(); // ONE tokens.consume(); // DO tokens.consume(); // SOMETHING tokens.consume(); // ; assertThat(tokens.nextPosition().getIndexInContent(), is(31)); assertThat(tokens.nextPosition().getColumn(), is(1)); tokens.consume(); // ALTER assertThat(tokens.nextPosition().getIndexInContent(), is(37)); assertThat(tokens.nextPosition().getColumn(), is(7)); } }