public ST getDependencies() { loadDependencyTemplates(); ST dependenciesST = templates.getInstanceOf("dependencies"); dependenciesST.add("in", getDependenciesFileList()); dependenciesST.add("out", getGeneratedFileList()); dependenciesST.add("grammarFileName", g.fileName); return dependenciesST; }
/** * Return a list of File objects that name files ANTLR will read * to process T.g; This can be .tokens files if the grammar uses the tokenVocab option * as well as any imported grammar files. */ public List<File> getDependenciesFileList() { // Find all the things other than imported grammars List<File> files = getNonImportDependenciesFileList(); // Handle imported grammars List<Grammar> imports = g.getAllImportedGrammars(); if ( imports!=null ) { for (Grammar g : imports) { String libdir = tool.libDirectory; String fileName = groomQualifiedFileName(libdir, g.fileName); files.add(new File(fileName)); } } if (files.isEmpty()) { return null; } return files; }
public void processGrammarsOnCommandLine() { List<GrammarRootAST> sortedGrammars = sortGrammarByTokenVocab(grammarFiles); for (GrammarRootAST t : sortedGrammars) { final Grammar g = createGrammar(t); g.fileName = t.fileName; if ( gen_dependencies ) { BuildDependencyGenerator dep = new BuildDependencyGenerator(this, g); /* List outputFiles = dep.getGeneratedFileList(); List dependents = dep.getDependenciesFileList(); System.out.println("output: "+outputFiles); System.out.println("dependents: "+dependents); */ System.out.println(dep.getDependencies().render()); } else if (errMgr.getNumErrors() == 0) { process(g, true); } } }
files.add(getOutputFile(generator.getRecognizerFileName())); files.add(getOutputFile(generator.getVocabFileName())); String suffix = Grammar.getGrammarTypeToFileNameSuffix(g.getType()); String fileName = g.name + suffix + headerExtST.render(); files.add(getOutputFile(fileName)); files.add(getOutputFile(lexer)); String lexerTokens = g.name + suffix + CodeGenerator.VOCAB_FILE_EXTENSION; files.add(getOutputFile(lexerTokens)); files.add(getOutputFile(header)); files.add(getOutputFile(generator.getListenerFileName())); files.add(getOutputFile(generator.getBaseListenerFileName())); files.add(getOutputFile(generator.getVisitorFileName())); files.add(getOutputFile(generator.getBaseVisitorFileName())); files.add(getOutputFile(g.fileName));
public void processGrammarsOnCommandLine() { List<GrammarRootAST> sortedGrammars = sortGrammarByTokenVocab(grammarFiles); for (GrammarRootAST t : sortedGrammars) { final Grammar g = createGrammar(t); g.fileName = t.fileName; if ( gen_dependencies ) { BuildDependencyGenerator dep = new BuildDependencyGenerator(this, g); /* List outputFiles = dep.getGeneratedFileList(); List dependents = dep.getDependenciesFileList(); System.out.println("output: "+outputFiles); System.out.println("dependents: "+dependents); */ System.out.println(dep.getDependencies().render()); } else if (errMgr.getNumErrors() == 0) { process(g, true); } } }
files.add(getOutputFile(generator.getRecognizerFileName())); files.add(getOutputFile(generator.getVocabFileName())); String suffix = Grammar.getGrammarTypeToFileNameSuffix(g.getType()); String fileName = g.name + suffix + headerExtST.render(); files.add(getOutputFile(fileName)); files.add(getOutputFile(lexer)); String lexerTokens = g.name + suffix + CodeGenerator.VOCAB_FILE_EXTENSION; files.add(getOutputFile(lexerTokens)); files.add(getOutputFile(header)); files.add(getOutputFile(generator.getListenerFileName())); files.add(getOutputFile(generator.getBaseListenerFileName())); files.add(getOutputFile(generator.getVisitorFileName())); files.add(getOutputFile(generator.getBaseVisitorFileName())); files.add(getOutputFile(g.fileName));
public ST getDependencies() { loadDependencyTemplates(); ST dependenciesST = templates.getInstanceOf("dependencies"); dependenciesST.add("in", getDependenciesFileList()); dependenciesST.add("out", getGeneratedFileList()); dependenciesST.add("grammarFileName", g.fileName); return dependenciesST; }
public void processGrammarsOnCommandLine() { List<GrammarRootAST> sortedGrammars = sortGrammarByTokenVocab(grammarFiles); for (GrammarRootAST t : sortedGrammars) { final Grammar g = createGrammar(t); g.fileName = t.fileName; if ( gen_dependencies ) { BuildDependencyGenerator dep = new BuildDependencyGenerator(this, g); /* List outputFiles = dep.getGeneratedFileList(); List dependents = dep.getDependenciesFileList(); System.out.println("output: "+outputFiles); System.out.println("dependents: "+dependents); */ System.out.println(dep.getDependencies().render()); } else if (errMgr.getNumErrors() == 0) { process(g, true); } } }
/** * Return a list of File objects that name files ANTLR will read * to process T.g; This can be .tokens files if the grammar uses the tokenVocab option * as well as any imported grammar files. */ public List<File> getDependenciesFileList() { // Find all the things other than imported grammars List<File> files = getNonImportDependenciesFileList(); // Handle imported grammars List<Grammar> imports = g.getAllImportedGrammars(); if ( imports!=null ) { for (Grammar g : imports) { String libdir = tool.libDirectory; String fileName = groomQualifiedFileName(libdir, g.fileName); files.add(new File(fileName)); } } if (files.isEmpty()) { return null; } return files; }
files.add(getOutputFile(generator.getRecognizerFileName(true))); files.add(getOutputFile(generator.getRecognizerFileName(false))); files.add(getOutputFile(generator.getVocabFileName())); String suffix = Grammar.getGrammarTypeToFileNameSuffix(g.getType()); String fileName = g.name + suffix + headerExtST.render(); files.add(getOutputFile(fileName)); files.add(getOutputFile(lexer)); String lexerTokens = g.name + suffix + CodeGenerator.VOCAB_FILE_EXTENSION; files.add(getOutputFile(lexerTokens)); files.add(getOutputFile(header)); files.add(getOutputFile(generator.getListenerFileName(true))); files.add(getOutputFile(generator.getListenerFileName(false))); files.add(getOutputFile(generator.getBaseListenerFileName(true))); files.add(getOutputFile(generator.getBaseListenerFileName(false))); files.add(getOutputFile(generator.getVisitorFileName(true))); files.add(getOutputFile(generator.getVisitorFileName(false))); files.add(getOutputFile(generator.getBaseVisitorFileName(true)));
public ST getDependencies() { loadDependencyTemplates(); ST dependenciesST = templates.getInstanceOf("dependencies"); dependenciesST.add("in", getDependenciesFileList()); dependenciesST.add("out", getGeneratedFileList()); dependenciesST.add("grammarFileName", g.fileName); return dependenciesST; }
public void processGrammarsOnCommandLine() { List<GrammarRootAST> sortedGrammars = sortGrammarByTokenVocab(grammarFiles); for (GrammarRootAST t : sortedGrammars) { final Grammar g = createGrammar(t); g.fileName = t.fileName; if ( gen_dependencies ) { BuildDependencyGenerator dep = new BuildDependencyGenerator(this, g); /* List outputFiles = dep.getGeneratedFileList(); List dependents = dep.getDependenciesFileList(); System.out.println("output: "+outputFiles); System.out.println("dependents: "+dependents); */ System.out.println(dep.getDependencies().render()); } else if (errMgr.getNumErrors() == 0) { process(g, true); } } }
/** * Return a list of File objects that name files ANTLR will read * to process T.g; This can be .tokens files if the grammar uses the tokenVocab option * as well as any imported grammar files. */ public List<File> getDependenciesFileList() { // Find all the things other than imported grammars List<File> files = getNonImportDependenciesFileList(); // Handle imported grammars List<Grammar> imports = g.getAllImportedGrammars(); if ( imports!=null ) { for (Grammar g : imports) { String libdir = tool.libDirectory; String fileName = groomQualifiedFileName(libdir, g.fileName); files.add(new File(fileName)); } } if (files.isEmpty()) { return null; } return files; }
files.add(getOutputFile(generator.getRecognizerFileName(true))); files.add(getOutputFile(generator.getRecognizerFileName(false))); files.add(getOutputFile(generator.getVocabFileName())); String suffix = Grammar.getGrammarTypeToFileNameSuffix(g.getType()); String fileName = g.name + suffix + headerExtST.render(); files.add(getOutputFile(fileName)); files.add(getOutputFile(lexer)); String lexerTokens = g.name + suffix + CodeGenerator.VOCAB_FILE_EXTENSION; files.add(getOutputFile(lexerTokens)); files.add(getOutputFile(header)); files.add(getOutputFile(generator.getListenerFileName(true))); files.add(getOutputFile(generator.getListenerFileName(false))); files.add(getOutputFile(generator.getBaseListenerFileName(true))); files.add(getOutputFile(generator.getBaseListenerFileName(false))); files.add(getOutputFile(generator.getVisitorFileName(true))); files.add(getOutputFile(generator.getVisitorFileName(false))); files.add(getOutputFile(generator.getBaseVisitorFileName(true)));
public ST getDependencies() { loadDependencyTemplates(); ST dependenciesST = templates.getInstanceOf("dependencies"); dependenciesST.add("in", getDependenciesFileList()); dependenciesST.add("out", getGeneratedFileList()); dependenciesST.add("grammarFileName", g.fileName); return dependenciesST; }
public void processGrammarsOnCommandLine() { List<GrammarRootAST> sortedGrammars = sortGrammarByTokenVocab(grammarFiles); for (GrammarRootAST t : sortedGrammars) { final Grammar g = createGrammar(t); g.fileName = t.fileName; if ( gen_dependencies ) { BuildDependencyGenerator dep = new BuildDependencyGenerator(this, g); /* List outputFiles = dep.getGeneratedFileList(); List dependents = dep.getDependenciesFileList(); System.out.println("output: "+outputFiles); System.out.println("dependents: "+dependents); */ System.out.println(dep.getDependencies().render()); } else if (errMgr.getNumErrors() == 0) { process(g, true); } } }
/** * Return a list of File objects that name files ANTLR will read * to process T.g; This can be .tokens files if the grammar uses the tokenVocab option * as well as any imported grammar files. */ public List<File> getDependenciesFileList() { // Find all the things other than imported grammars List<File> files = getNonImportDependenciesFileList(); // Handle imported grammars List<Grammar> imports = g.getAllImportedGrammars(); if ( imports!=null ) { for (Grammar g : imports) { String libdir = tool.libDirectory; String fileName = groomQualifiedFileName(libdir, g.fileName); files.add(new File(fileName)); } } if (files.isEmpty()) { return null; } return files; }
files.add(getOutputFile(generator.getRecognizerFileName(true))); files.add(getOutputFile(generator.getRecognizerFileName(false))); files.add(getOutputFile(generator.getVocabFileName())); String suffix = Grammar.getGrammarTypeToFileNameSuffix(g.getType()); String fileName = g.name + suffix + headerExtST.render(); files.add(getOutputFile(fileName)); files.add(getOutputFile(lexer)); String lexerTokens = g.name + suffix + CodeGenerator.VOCAB_FILE_EXTENSION; files.add(getOutputFile(lexerTokens)); files.add(getOutputFile(header)); files.add(getOutputFile(generator.getListenerFileName(true))); files.add(getOutputFile(generator.getListenerFileName(false))); files.add(getOutputFile(generator.getBaseListenerFileName(true))); files.add(getOutputFile(generator.getBaseListenerFileName(false))); files.add(getOutputFile(generator.getVisitorFileName(true))); files.add(getOutputFile(generator.getVisitorFileName(false))); files.add(getOutputFile(generator.getBaseVisitorFileName(true)));
public ST getDependencies() { loadDependencyTemplates(); ST dependenciesST = templates.getInstanceOf("dependencies"); dependenciesST.add("in", getDependenciesFileList()); dependenciesST.add("out", getGeneratedFileList()); dependenciesST.add("grammarFileName", g.fileName); return dependenciesST; }
/** * Return a list of File objects that name files ANTLR will read * to process T.g; This can be .tokens files if the grammar uses the tokenVocab option * as well as any imported grammar files. */ public List<File> getDependenciesFileList() { // Find all the things other than imported grammars List<File> files = getNonImportDependenciesFileList(); // Handle imported grammars List<Grammar> imports = g.getAllImportedGrammars(); if ( imports!=null ) { for (Grammar g : imports) { String libdir = tool.libDirectory; String fileName = groomQualifiedFileName(libdir, g.fileName); files.add(new File(fileName)); } } if (files.isEmpty()) { return null; } return files; }