private String convert(String input) { CodeReader reader = new CodeReader(input); MarkdownOutput output = new MarkdownOutput(); dispatcher.consume(reader, output); return output.toString(); }
public TokenQueue chunk(Reader reader) { CodeReader code = new CodeReader(reader); TokenQueue queue = new TokenQueue(); try { channelDispatcher.consume(code, queue); return queue; } catch (Exception e) { throw new DuplicationsException("Unable to lex source code at line : " + code.getLinePosition() + " and column : " + code.getColumnPosition(), e); } }
/** * Parse the input into a list of tokens, with parent/child relations between the tokens. */ public List<Node> parse(Reader reader) { // CodeReader reads the file stream CodeReader codeReader = new CodeReader(reader); // ArrayList collects the nodes List<Node> nodeList = new ArrayList<Node>(); // ChannelDispatcher manages the tokenizers ChannelDispatcher<List<Node>> channelDispatcher = ChannelDispatcher.builder().addChannels((Channel[]) tokenizers.toArray(new Channel[tokenizers.size()])).build(); channelDispatcher.consume(codeReader, nodeList); createNodeHierarchy(nodeList); return nodeList; }
/** * Parse the input into a list of tokens, with parent/child relations between the tokens. */ public List<Node> parse(Reader reader) { // CodeReader reads the file stream CodeReader codeReader = new CodeReader(reader); // ArrayList collects the nodes List<Node> nodeList = new ArrayList<>(); // ChannelDispatcher manages the tokenizers ChannelDispatcher<List<Node>> channelDispatcher = ChannelDispatcher.builder().addChannels((Channel[]) tokenizers.toArray(new Channel[tokenizers.size()])).build(); channelDispatcher.consume(codeReader, nodeList); createNodeHierarchy(nodeList); return nodeList; }
/** * Parse the input into a list of tokens, with parent/child relations between the tokens. */ public List<Node> parse(Reader reader) { // CodeReader reads the file stream CodeReader codeReader = new CodeReader(reader); // ArrayList collects the nodes List<Node> nodeList = new ArrayList<>(); // ChannelDispatcher manages the tokenizers ChannelDispatcher<List<Node>> channelDispatcher = ChannelDispatcher.builder() .addChannels((Channel[]) tokenizers.toArray(new Channel[0])) .build(); channelDispatcher.consume(codeReader, nodeList); createNodeHierarchy(nodeList); return nodeList; }
public TokenQueue chunk(Reader reader) { CodeReader code = new CodeReader(reader); TokenQueue queue = new TokenQueue(); try { channelDispatcher.consume(code, queue); return queue; } catch (Exception e) { throw new DuplicationsException("Unable to lex source code at line : " + code.getLinePosition() + " and column : " + code.getColumnPosition(), e); } }
@Override public final void tokenize(SourceCode source, Tokens cpdTokens) { String fileName = source.getFileName(); ChannelDispatcher.Builder lexerBuilder = ChannelDispatcher.builder(); lexerBuilder.addChannel(CommentChannel.JSP_COMMENT); lexerBuilder.addChannel(CommentChannel.HTML_COMMENT); lexerBuilder.addChannel(CommentChannel.C_COMMENT); lexerBuilder.addChannel(CommentChannel.CPP_COMMENT); lexerBuilder.addChannel(new WordChannel(fileName)); lexerBuilder.addChannel(new LiteralChannel(fileName)); lexerBuilder.addChannel(new BlackHoleChannel()); ChannelDispatcher<Tokens> lexer = lexerBuilder.build(); try { lexer.consume(new CodeReader(new FileReader(new File(fileName))), cpdTokens); cpdTokens.add(TokenEntry.getEOF()); } catch (FileNotFoundException e) { LOG.error("Unable to open file : " + fileName, e); } }