private static <T> Parser<T> op(char ch, T value) { return isChar(ch).retn(value); }
static Parser<Modifier> systemModifier(SystemModifier... modifiers) { List<Parser<Modifier>> list = new ArrayList<Parser<Modifier>>(modifiers.length); for (Modifier modifier : modifiers) { list.add(term(modifier.toString()).retn(modifier)); } return Parsers.or(list); }
/************************** utility methods ****************************/ private static Parser<Expression> compare( Parser<Expression> operand, String name, Op op) { return Parsers.sequence( operand, term(name).retn(op), operand, BinaryExpression::new); }
private static Parser<UnaryOperator<Expression>> unary(String name, Op op) { return term(name).retn(e -> new UnaryExpression(op, e)); } }
private static Parser<BinaryOperator<Expression>> binary(String name, Op op) { return term(name).retn((l, r) -> new BinaryExpression(l, op, r)); }
/** * Gets a {@link Lexicon} instance with {@link Tokens#reserved(String)} as each operator's value * and a lexer that strives to try the shortest operator first. * * <p> Safely speaking, we can always start from the longest operator and falls back to shorter * ones. Yet shorter operators are more often used than longer ones and the scanning of them is * faster. However, scanning shorter operators first has the chance that a "==" is mistakenly * scanned as "=" followed by another "=". In order to avoid this, we analyze the prefix * relationship and make sure that prefixes are scanned after prefixes. */ static Lexicon lexicon(final Collection<String> operatorNames) { final Map<String, Object> operators = new HashMap<String, Object>(); final String[] ops = sort(operatorNames.toArray(new String[operatorNames.size()])); final Parser<?>[] lexers = new Parser<?>[ops.length]; for (int i = 0; i < ops.length; i++) { String s = ops[i]; Parser<?> scanner = s.length() == 1 ? Scanners.isChar(s.charAt(0)) : Scanners.string(s); Object value = Tokens.reserved(s); operators.put(s, value); lexers[i] = scanner.retn(value); } return new Lexicon(operators::get, Parsers.or(lexers)); }
private static Parser<UnaryOperator<Expression>> postfix(Operator op) { return term(op.toString()).retn(e -> new PostfixUnaryExpression(e, op)); } }
private static Parser<BinaryOperator<Expression>> binary(Operator op) { return term(op.toString()).retn((l, r) -> new BinaryExpression(l, op, r)); }
private static Parser<UnaryOperator<Expression>> prefix(Operator op) { return term(op.toString()).retn(e -> new PrefixUnaryExpression(op, e)); }
private static Parser<JoinType> joinType(JoinType joinType, String phrase1, String phrase2) { return Parsers.or(TerminalParser.phrase(phrase1), TerminalParser.phrase(phrase2)).retn(joinType); }
/** * A {@link Parser} that recognizes a sequence of tokens identified by {@code tokenNames}, as an * atomic step. */ public Parser<?> phrase(String... tokenNames) { Parser<?>[] wordParsers = new Parser<?>[tokenNames.length]; for (int i = 0; i < tokenNames.length; i++) { wordParsers[i] = token(tokenNames[i]); } String phrase = Strings.join(" ", tokenNames); return Parsers.sequence(wordParsers).atomic().retn(phrase).label(phrase); }
/** * A {@link Parser} that greedily runs {@code tokenizer}, and translates line feed characters * ({@code '\n'}) to {@code indent} and {@code outdent} tokens. * Return values are wrapped in {@link Token} objects and collected in a {@link List}. * Patterns recognized by {@code delim} are ignored. */ public Parser<List<Token>> lexer(Parser<?> tokenizer, Parser<?> delim) { Parser<?> lf = Scanners.isChar('\n').retn(Punctuation.LF); return Parsers.or(tokenizer, lf).lexer(delim) .map(tokens -> analyzeIndentations(tokens, Punctuation.LF)); }
static Parser<DefBody> body(Parser<Member> member) { Parser<Member> empty = term(";").retn(null); return Parsers.between(term("{"), empty.or(member).many().map(DeclarationParser::removeNulls), term("}")) .map(DefBody::new); }
static Parser<TypeLiteral> wildcard(Parser<TypeLiteral> type) { return Parsers.or( phrase("? extends").next(type).map(UpperBoundWildcard::new), phrase("? super").next(type).map(LowerBoundWildcard::new), term("?").retn(new UpperBoundWildcard(null))); } }
static Parser<Member> methodDef( Parser<Modifier> mod, Parser<Expression> defaultValue, Parser<Statement> stmt) { return Parsers.sequence( mod.many(), TYPE_PARAMETERS.optional(), TypeLiteralParser.TYPE_LITERAL, Terminals.Identifier.PARSER, term("(").next(StatementParser.parameter(mod).sepBy(term(","))).followedBy(term(")")), term("throws").next(TypeLiteralParser.ELEMENT_TYPE_LITERAL.sepBy1(term(","))).optional(), term("default").next(ExpressionParser.arrayInitializerOrRegularExpression(defaultValue)) .optional(), Parsers.or( StatementParser.blockStatement(stmt), term(";").retn((BlockStatement) null)), MethodDef::new); }