public static Rule alwaysFalse(String name) { return builder().name(name).when(new BooleanExpression(new CommonToken(-1), false)).then(Collections.emptyList()).build(); }
@Override public CommonToken create(Pair<TokenSource, CharStream> source, int type, String text, int channel, int start, int stop, int line, int charPositionInLine) { CommonToken t = new CommonToken(source, type, channel, start, stop); t.setLine(line); t.setCharPositionInLine(charPositionInLine); if ( text!=null ) { t.setText(text); } else if ( copyText && source.b != null ) { t.setText(source.b.getText(Interval.of(start,stop))); } return t; }
public String toString(Recognizer r) { String channelStr = ""; if ( channel>0 ) { channelStr=",channel="+channel; } String txt = getText(); if ( txt!=null ) { txt = txt.replace("\n","\\n"); txt = txt.replace("\r","\\r"); txt = txt.replace("\t","\\t"); } else { txt = "<no text>"; } String typeString = String.valueOf(type); if ( r!=null ) { typeString = r.getVocabulary().getDisplayName(type); } return "[@"+getTokenIndex()+","+start+":"+stop+"='"+txt+"',<"+typeString+">"+channelStr+","+line+":"+getCharPositionInLine()+"]"; } }
/** * This method is used to get the first non-tight HTML tag encountered while parsing javadoc. * This shall eventually be reflected by the {@link ParseStatus} object returned by * {@link #parseJavadocAsDetailNode(DetailAST)} method via the instance member * {@link ParseStatus#firstNonTightHtmlTag}, and checks not supposed to process non-tight HTML * or the ones which are supposed to log violation for non-tight javadocs can utilize that. * * @param javadocParser The ANTLR recognizer instance which has been used to parse the javadoc * @return First non-tight HTML tag if one exists; null otherwise */ private Token getFirstNonTightHtmlTag(JavadocParser javadocParser) { final CommonToken offendingToken; final ParserRuleContext nonTightTagStartContext = javadocParser.nonTightTagStartContext; if (nonTightTagStartContext == null) { offendingToken = null; } else { final Token token = ((TerminalNode) nonTightTagStartContext.getChild(1)) .getSymbol(); offendingToken = new CommonToken(token); offendingToken.setLine(offendingToken.getLine() + errorListener.offset); } return offendingToken; }
@Override public Token nextToken() { if (!queue.isEmpty()) { return queue.poll(); } Token next = super.nextToken(); if (next.getType() != Word) { return next; } Token next2 = super.nextToken(); if (next2.getType() == Punctuation && next2.getText().equals(".")) { String abbrev = next.getText() + "."; if (abbreviations != null && abbreviations.contains(abbrev)) { CommonToken commonToken = new CommonToken(Abbreviation, abbrev); commonToken.setStartIndex(next.getStartIndex()); commonToken.setStopIndex(next2.getStopIndex()); commonToken.setTokenIndex(next.getTokenIndex()); commonToken.setCharPositionInLine(next.getCharPositionInLine()); commonToken.setLine(next.getLine()); return commonToken; } } queue.offer(next2); return next; }
if ( _input.LA(1)=='/' ) { consume(); t = new CommonToken(ANYWHERE, "//"); t = new CommonToken(ROOT, "/"); t = new CommonToken(WILDCARD, "*"); break; case '!': consume(); t = new CommonToken(BANG, "!"); break; case '\'': String s = matchString(); t = new CommonToken(STRING, s); break; case CharStream.EOF : return new CommonToken(EOF, "<EOF>"); default: if ( isNameStartChar(_input.LA(1)) ) { String id = matchID(); if ( Character.isUpperCase(id.charAt(0)) ) t = new CommonToken(TOKEN_REF, id); else t = new CommonToken(RULE_REF, id); t.setStartIndex(_tokenStartCharIndex); t.setCharPositionInLine(_tokenStartCharIndex); t.setLine(line); return t;
public static void wipeCharPositionInfoAndWhitespaceTokens(CodeBuffTokenStream tokens) { tokens.fill(); CommonToken dummy = new CommonToken(Token.INVALID_TYPE, ""); dummy.setChannel(Token.HIDDEN_CHANNEL); Token firstRealToken = tokens.getNextRealToken(-1); for (int i = 0; i<tokens.size(); i++) { if ( i==firstRealToken.getTokenIndex() ) continue; // don't wack first token CommonToken t = (CommonToken)tokens.get(i); if ( t.getText().matches("\\s+") ) { tokens.getTokens().set(i, dummy); // wack whitespace token so we can't use it during prediction } else { t.setLine(0); t.setCharPositionInLine(-1); } } }
private static void reinitializeObject(final Type objectType, final StaticScope objectsScope) { final CommonToken objectToken = new CommonToken(0); objectToken.setLine(157239); final ClassDeclaration objectClass = new ClassDeclaration("Object", objectsScope, null, objectToken); globalScope_.declareClass(objectClass); objectClass.setType(objectType); objectsScope.setDeclaration(objectClass); typeDeclarationList_.add(objectClass); }
@Override public void syntaxError(Recognizer<?, ?> recognizer, Object offendingSymbol, int line, int charPositionInLine, String msg, RecognitionException e) { if (!REPORT_SYNTAX_ERRORS) { return; } String sourceName = recognizer.getInputStream().getSourceName(); if (!sourceName.isEmpty()) { sourceName = String.format("%s:%d:%d: ", sourceName, line, charPositionInLine); } final CommonToken errorToken = new CommonToken(0); errorToken.setLine(line); errorToken.setCharPositionInLine(charPositionInLine); ErrorLogger.error(ErrorIncidenceType.Parse, errorToken, "column ", Integer.toString(charPositionInLine), ": ", msg); } }
private Token createDedent() { CommonToken dedent = commonToken(ProgramParser.DEDENT, ""); dedent.setLine(this.lastToken.getLine()); return dedent; }
public void processToken(int indexIntoRealTokens, int tokenIndexInStream, boolean collectAnalysis) { CommonToken curToken = (CommonToken)testDoc.tokens.get(tokenIndexInStream); String tokText = curToken.getText(); TerminalNode node = tokenToNodeMap.get(curToken); curToken.setLine(line); curToken.setCharPositionInLine(charPosInLine);
public static Token getTokenUnderCursor(CommonTokenStream tokens, int offset) { Comparator<Token> cmp = new Comparator<Token>() { @Override public int compare(Token a, Token b) { if ( a.getStopIndex() < b.getStartIndex() ) return -1; if ( a.getStartIndex() > b.getStopIndex() ) return 1; return 0; } }; if ( offset<0 || offset >= tokens.getTokenSource().getInputStream().size() ) return null; CommonToken key = new CommonToken(Token.INVALID_TYPE, ""); key.setStartIndex(offset); key.setStopIndex(offset); List<Token> tokenList = tokens.getTokens(); Token tokenUnderCursor = null; int i = Collections.binarySearch(tokenList, key, cmp); if ( i>=0 ) tokenUnderCursor = tokenList.get(i); return tokenUnderCursor; }
public void setIndexOfLastToken(int indexOfLastToken) { System.out.println("setIndexOfLastToken("+indexOfLastToken+")"); if ( indexOfLastToken<0 ) { System.out.println("replacing "+saveToken.getTokenIndex()+" with "+saveToken); tokens.set(saveToken.getTokenIndex(), saveToken); // this.indexOfLastToken = indexOfLastToken; return; } int i = indexOfLastToken + 1; // we want to keep token at indexOfLastToken sync(i); saveToken = tokens.get(i); System.out.println("saving "+saveToken); CommonToken stopToken = new CommonToken(saveToken); stopToken.setType(STOP_TOKEN_TYPE); System.out.println("setting "+i+" to "+stopToken); tokens.set(i, stopToken); // this.indexOfLastToken = indexOfLastToken; } }
public ListTokenSource(List<Token> token) { this.token = token; Preconditions.checkNotNull(token); Preconditions.checkArgument(!token.isEmpty(), "Internal token list must not be empty"); for(Token t : token) { if(t.getTokenSource() != null) { this.factory = t.getTokenSource().getTokenFactory(); break; } } Preconditions.checkNotNull(this.factory, "Internal token list needs a valid TokenSource"); Token lastToken = token.get(token.size()-1); eofToken.setLine(lastToken.getLine()); eofToken.setCharPositionInLine(lastToken.getCharPositionInLine()); }
@Override public String getText() { if (type != null) { setText(type); } return super.getText(); } }
@Override public String getText() { if ( text!=null ) { return text; } CharStream input = getInputStream(); if ( input==null ) return null; int n = input.size(); if ( start<n && stop<n) { return input.getText(Interval.of(start,stop)); } else { return "<EOF>"; } }
ErrorNodeImpl eni = (ErrorNodeImpl) child; CommonToken offendingToken = (CommonToken) eni.getSymbol(); int offendingStart = offendingToken.getCharPositionInLine(); int offendingStop = -1; String offendingSource = ""; suggestToken.setStart(offendingStart + 1); suggestToken.setStop(offendingStop); suggestToken.setTokenNum(offendingToken.getType()); suggestToken.setTokenSource(offendingSource); suggest.add(suggestToken);
final int tokenIndex = token.getTokenIndex(); final Parser parser = (Parser) recognizer; if( parser.getRuleInvocationStack().contains( "nameDefinition" ) ) { error = generateInvalidVariableError(offendingSymbol, line, charPositionInLine, e, token); } else if ( "}".equals(token.getText()) && tokenIndex > 1 && ":".equals(parser.getTokenStream().get(tokenIndex - 1).getText()) ) { error = new SyntaxErrorEvent( FEELEvent.Severity.ERROR, Msg.createMessage(Msg.MISSING_EXPRESSION, parser.getTokenStream().get(tokenIndex - 2).getText()),
@Override public void syntaxError(Recognizer<?, ?> recognizer, Object offendingSymbol, int line, int charPositionInLine, String msg, RecognitionException e) { int fromIndex = getCharIndex(inputContent, line-1, charPositionInLine); CommonToken token = (CommonToken) offendingSymbol; int toIndex; if (token != null && token.getType() != Token.EOF) toIndex = fromIndex + token.getText().length() - 1; else toIndex = fromIndex; errors.add(new Range(fromIndex, toIndex)); }
private void initMissing(Recognizer<?, ?> recognizer,CommonToken commonToken){ try{ this.startLine = 1; this.startCharPositionInLine = 0; this.stopLine = commonToken.getLine(); this.stopCharPositionInLine = commonToken.getCharPositionInLine(); String text = commonToken.getTokenSource().getInputStream().toString(); if (text != null) { reader = new InnerScriptReader(text); } }catch (Exception e) { throw new RuntimeException(e); } }