/** * Get a list of the typed dependencies, including extras like control * dependencies, collapsing them and distributing relations across * coordination. This method is generally recommended for best * representing the semantic and syntactic relations of a sentence. In * general it returns a directed graph (i.e., the output may not be a tree * and it may contain (small) cycles). * The "CCPropagated" option corresponds to calling this method. * * @return collapsed dependencies with CC processed */ public List<TypedDependency> typedDependenciesCCprocessed() { return typedDependenciesCCprocessed(Extras.MAXIMAL); }
/** * @see edu.stanford.nlp.trees.GrammaticalStructure#typedDependenciesCCprocessed(edu.stanford.nlp.trees.GrammaticalStructure.Extras) */ @Deprecated public List<TypedDependency> typedDependenciesCCprocessed(boolean includeExtras) { return typedDependenciesCCprocessed(includeExtras ? Extras.MAXIMAL : Extras.NONE); }
/** * demoDP demonstrates turning a file into tokens and then parse * trees. Note that the trees are printed by calling pennPrint on * the Tree object. It is also possible to pass a PrintWriter to * pennPrint if you want to capture the output. * This code will work with any supported language. */ public static void demoDP(LexicalizedParser lp, String filename) { // This option shows loading, sentence-segmenting and tokenizing // a file using DocumentPreprocessor. TreebankLanguagePack tlp = lp.treebankLanguagePack(); // a PennTreebankLanguagePack for English GrammaticalStructureFactory gsf = null; if (tlp.supportsGrammaticalStructures()) { gsf = tlp.grammaticalStructureFactory(); } // You could also create a tokenizer here (as below) and pass it // to DocumentPreprocessor for (List<HasWord> sentence : new DocumentPreprocessor(filename)) { Tree parse = lp.apply(sentence); parse.pennPrint(); System.out.println(); if (gsf != null) { GrammaticalStructure gs = gsf.newGrammaticalStructure(parse); Collection tdl = gs.typedDependenciesCCprocessed(); System.out.println(tdl); System.out.println(); } } }
System.out.println(); GrammaticalStructure gs = gsf.newGrammaticalStructure(parse); List<TypedDependency> tdl = gs.typedDependenciesCCprocessed(); System.out.println(tdl); System.out.println();
break; case CCPROCESSED: deps = gs.typedDependenciesCCprocessed(includeExtras); break; case BASIC:
System.out.println("---------- CCprocessed dependencies ----------"); List<TypedDependency> deps = gs.typedDependenciesCCprocessed(GrammaticalStructure.Extras.MAXIMAL); if (checkConnected) { if (!GrammaticalStructure.isConnected(deps)) { printDependencies(gs, gs.typedDependenciesCCprocessed(GrammaticalStructure.Extras.MAXIMAL), tree, conllx, false, opts.convertToUPOS); } else { printDependencies(gs, gs.typedDependenciesEnhancedPlusPlus(), tree, conllx, false, opts.convertToUPOS);
Collection<TypedDependency> tdls = gs.typedDependenciesCCprocessed(true); questionStructure.setTdls(tdls); Map<String, String> map = new HashMap<>();
List<TypedDependency> tdl = gs.typedDependenciesCCprocessed(); System.out.println(tdl); System.out.println();
print(gs.typedDependenciesCCprocessed(), "xml", includeTags, pw); print(gs.typedDependenciesCCprocessed(), "xml", includeTags, pw); print(gs.typedDependenciesCCprocessed(), includeTags, pw); print(gs.typedDependenciesCCprocessed(), includeTags, pw);
/** * Get a list of the typed dependencies, including extras like control * dependencies, collapsing them and distributing relations across * coordination. This method is generally recommended for best * representing the semantic and syntactic relations of a sentence. In * general it returns a directed graph (i.e., the output may not be a tree * and it may contain (small) cycles). * The "CCPropagated" option corresponds to calling this method. * * @return collapsed dependencies with CC processed */ public List<TypedDependency> typedDependenciesCCprocessed() { return typedDependenciesCCprocessed(Extras.MAXIMAL); }
/** * Get a list of the typed dependencies, including extras like control * dependencies, collapsing them and distributing relations across * coordination. This method is generally recommended for best * representing the semantic and syntactic relations of a sentence. In * general it returns a directed graph (i.e., the output may not be a tree * and it may contain (small) cycles). * * @return collapsed dependencies with CC processed */ public List<TypedDependency> typedDependenciesCCprocessed() { return typedDependenciesCCprocessed(true); }
/** * Get a list of the typed dependencies, including extras like control * dependencies, collapsing them and distributing relations across * coordination. This method is generally recommended for best * representing the semantic and syntactic relations of a sentence. In * general it returns a directed graph (i.e., the output may not be a tree * and it may contain (small) cycles). * The "CCPropagated" option corresponds to calling this method. * * @return collapsed dependencies with CC processed */ public List<TypedDependency> typedDependenciesCCprocessed() { return typedDependenciesCCprocessed(Extras.MAXIMAL); }
/** * Get a list of the typed dependencies, including extras like control * dependencies, collapsing them and distributing relations across * coordination. This method is generally recommended for best * representing the semantic and syntactic relations of a sentence. In * general it returns a directed graph (i.e., the output may not be a tree * and it may contain (small) cycles). * The "CCPropagated" option corresponds to calling this method. * * @return collapsed dependencies with CC processed */ public List<TypedDependency> typedDependenciesCCprocessed() { return typedDependenciesCCprocessed(true); }
/** * @see edu.stanford.nlp.trees.GrammaticalStructure#typedDependenciesCCprocessed(edu.stanford.nlp.trees.GrammaticalStructure.Extras) */ @Deprecated public List<TypedDependency> typedDependenciesCCprocessed(boolean includeExtras) { return typedDependenciesCCprocessed(includeExtras ? Extras.MAXIMAL : Extras.NONE); }
/** * @see edu.stanford.nlp.trees.GrammaticalStructure#typedDependenciesCCprocessed(edu.stanford.nlp.trees.GrammaticalStructure.Extras) */ @Deprecated public List<TypedDependency> typedDependenciesCCprocessed(boolean includeExtras) { return typedDependenciesCCprocessed(includeExtras ? Extras.MAXIMAL : Extras.NONE); }
File f=new File("Data/tree.txt"); PrintWriter pw=new PrintWriter(f); // This option shows loading and using an explicit tokenizer String sent2 = "This is another sentence."; TokenizerFactory<CoreLabel> tokenizerFactory =PTBTokenizer.factory(new CoreLabelTokenFactory(), ""); Tokenizer<CoreLabel> tok =tokenizerFactory.getTokenizer(new StringReader(sent2)); List<CoreLabel> rawWords2 = tok.tokenize(); Tree parse = lp.apply(rawWords2); TreebankLanguagePack tlp = new PennTreebankLanguagePack(); GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory(); GrammaticalStructure gs = gsf.newGrammaticalStructure(parse); List<TypedDependency> tdl = gs.typedDependenciesCCprocessed(); System.out.println(tdl); System.out.println(); // You can also use a TreePrint object to print trees and dependencies TreePrint tp = new TreePrint("penn,typedDependenciesCollapsed"); tp.printTree(parse,pw);
/** * demoDP demonstrates turning a file into tokens and then parse * trees. Note that the trees are printed by calling pennPrint on * the Tree object. It is also possible to pass a PrintWriter to * pennPrint if you want to capture the output. * This code will work with any supported language. */ public static void demoDP(LexicalizedParser lp, String filename) { // This option shows loading, sentence-segmenting and tokenizing // a file using DocumentPreprocessor. TreebankLanguagePack tlp = lp.treebankLanguagePack(); // a PennTreebankLanguagePack for English GrammaticalStructureFactory gsf = null; if (tlp.supportsGrammaticalStructures()) { gsf = tlp.grammaticalStructureFactory(); } // You could also create a tokenizer here (as below) and pass it // to DocumentPreprocessor for (List<HasWord> sentence : new DocumentPreprocessor(filename)) { Tree parse = lp.apply(sentence); parse.pennPrint(); System.out.println(); if (gsf != null) { GrammaticalStructure gs = gsf.newGrammaticalStructure(parse); Collection tdl = gs.typedDependenciesCCprocessed(); System.out.println(tdl); System.out.println(); } } }
/** * demoDP demonstrates turning a file into tokens and then parse * trees. Note that the trees are printed by calling pennPrint on * the Tree object. It is also possible to pass a PrintWriter to * pennPrint if you want to capture the output. * This code will work with any supported language. */ public static void demoDP(LexicalizedParser lp, String filename) { // This option shows loading, sentence-segmenting and tokenizing // a file using DocumentPreprocessor. TreebankLanguagePack tlp = lp.treebankLanguagePack(); // a PennTreebankLanguagePack for English GrammaticalStructureFactory gsf = null; if (tlp.supportsGrammaticalStructures()) { gsf = tlp.grammaticalStructureFactory(); } // You could also create a tokenizer here (as below) and pass it // to DocumentPreprocessor for (List<HasWord> sentence : new DocumentPreprocessor(filename)) { Tree parse = lp.apply(sentence); parse.pennPrint(); System.out.println(); if (gsf != null) { GrammaticalStructure gs = gsf.newGrammaticalStructure(parse); Collection tdl = gs.typedDependenciesCCprocessed(); System.out.println(tdl); System.out.println(); } } }
List<TypedDependency> tdl = gs.typedDependenciesCCprocessed(); System.out.println(tdl); System.out.println();
List<TypedDependency> tdl = gs.typedDependenciesCCprocessed(); System.out.println(tdl); System.out.println();