public static void main(String[] args) throws Exception { Properties props = StringUtils.argsToProperties(args); StanfordCoreNLP pipeline = new StanfordCoreNLP(props); String file = props.getProperty("file"); String loadFile = props.getProperty("loadFile"); if (loadFile != null && ! loadFile.isEmpty()) { CustomAnnotationSerializer ser = new CustomAnnotationSerializer(false, false); InputStream is = new FileInputStream(loadFile); Pair<Annotation, InputStream> pair = ser.read(is); pair.second.close(); Annotation anno = pair.first; System.out.println(anno.toShorterString(StringUtils.EMPTY_STRING_ARRAY)); is.close(); } else if (file != null && ! file.equals("")) { String text = edu.stanford.nlp.io.IOUtils.slurpFile(file); Annotation doc = new Annotation(text); pipeline.annotate(doc); CustomAnnotationSerializer ser = new CustomAnnotationSerializer(false, false); PrintStream os = new PrintStream(new FileOutputStream(file + ".ser")); ser.write(doc, os).close(); log.info("Serialized annotation saved in " + file + ".ser"); } else { log.info("usage: CustomAnnotationSerializer [-file file] [-loadFile file]"); } }
Map<Integer, CorefChain> chains = loadCorefChains(reader); if(chains != null) doc.set(CorefCoreAnnotations.CorefChainAnnotation.class, chains); IntermediateSemanticGraph intermCollapsedDeps = loadDependencyGraph(reader); IntermediateSemanticGraph intermUncollapsedDeps = loadDependencyGraph(reader); IntermediateSemanticGraph intermCcDeps = loadDependencyGraph(reader); while((line = reader.readLine()) != null){ if(line.length() == 0) break; CoreLabel token = loadToken(line, haveExplicitAntecedent); tokens.add(token);
saveCorefChains(chains, pw); saveDependencyGraph(collapsedDeps, pw); SemanticGraph uncollapsedDeps = sent.get(SemanticGraphCoreAnnotations.BasicDependenciesAnnotation.class); saveDependencyGraph(uncollapsedDeps, pw); SemanticGraph ccDeps = sent.get(SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation.class); saveDependencyGraph(ccDeps, pw); if(tokens != null){ for(CoreLabel token: tokens){ saveToken(token, haveExplicitAntecedent, pw); pw.println();
pw.println(cid + " " + countMentions(cluster)); for(int i = 0; i < mention.position.length(); i ++) pw.print(" " + mention.position.get(i)); pw.print(" " + escapeSpace(mention.mentionSpan)); pw.println();
boolean rep = bits[2].equals("1"); Dictionaries.MentionType mentionType = parseMentionType(bits[3]); Dictionaries.Number number = parseNumber(bits[4]); Dictionaries.Gender gender = parseGender(bits[5]); Dictionaries.Animacy animacy = parseAnimacy(bits[6]); int startIndex = Integer.parseInt(bits[7]); int endIndex = Integer.parseInt(bits[8]); String span = unescapeSpace(bits[14 + posLen]); CorefChain.CorefMention mention = new CorefChain.CorefMention( mentionType,
Map<Integer, CorefChain> chains = loadCorefChains(reader); if(chains != null) doc.set(CorefCoreAnnotations.CorefChainAnnotation.class, chains); IntermediateSemanticGraph intermCollapsedDeps = loadDependencyGraph(reader); IntermediateSemanticGraph intermUncollapsedDeps = loadDependencyGraph(reader); IntermediateSemanticGraph intermCcDeps = loadDependencyGraph(reader); while((line = reader.readLine()) != null){ if(line.length() == 0) break; CoreLabel token = loadToken(line, haveExplicitAntecedent); tokens.add(token); SemanticGraph collapsedDeps = convertIntermediateGraph(intermCollapsedDeps, tokens); sentence.set(SemanticGraphCoreAnnotations.CollapsedDependenciesAnnotation.class, collapsedDeps); SemanticGraph uncollapsedDeps = convertIntermediateGraph(intermUncollapsedDeps, tokens); sentence.set(SemanticGraphCoreAnnotations.BasicDependenciesAnnotation.class, uncollapsedDeps); SemanticGraph ccDeps = convertIntermediateGraph(intermCcDeps, tokens); sentence.set(SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation.class, ccDeps);
IntermediateSemanticGraph intermCollapsedDeps = loadDependencyGraph(reader); IntermediateSemanticGraph intermUncollapsedDeps = loadDependencyGraph(reader); IntermediateSemanticGraph intermCcDeps = loadDependencyGraph(reader); while((line = reader.readLine()) != null){ if(line.length() == 0) break; CoreLabel token = loadToken(line, haveExplicitAntecedent); tokens.add(token); SemanticGraph collapsedDeps = convertIntermediateGraph(intermCollapsedDeps, tokens); sentence.set(SemanticGraphCoreAnnotations.CollapsedDependenciesAnnotation.class, collapsedDeps); SemanticGraph uncollapsedDeps = convertIntermediateGraph(intermUncollapsedDeps, tokens); sentence.set(BasicDependenciesAnnotation.class, uncollapsedDeps); SemanticGraph ccDeps = convertIntermediateGraph(intermCcDeps, tokens); sentence.set(CollapsedCCProcessedDependenciesAnnotation.class, ccDeps);
saveDependencyGraph(collapsedDeps, pw); SemanticGraph uncollapsedDeps = sent.get(BasicDependenciesAnnotation.class); saveDependencyGraph(uncollapsedDeps, pw); SemanticGraph ccDeps = sent.get(SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation.class); saveDependencyGraph(ccDeps, pw); if(tokens != null){ for(CoreLabel token: tokens){ saveToken(token, haveExplicitAntecedent, pw); pw.println();
public static void main(String[] args) throws Exception { Properties props = StringUtils.argsToProperties(args); StanfordCoreNLP pipeline = new StanfordCoreNLP(props); String file = props.getProperty("file"); String text = edu.stanford.nlp.io.IOUtils.slurpFile(file); Annotation doc = new Annotation(text); pipeline.annotate(doc); CustomAnnotationSerializer ser = new CustomAnnotationSerializer(false, false); PrintStream os = new PrintStream(new FileOutputStream(file + ".ser")); ser.save(doc, os); os.close(); System.out.println("Serialized annotation saved in " + file + ".ser"); } }
boolean rep = bits[2].equals("1"); Dictionaries.MentionType mentionType = parseMentionType(bits[3]); Dictionaries.Number number = parseNumber(bits[4]); Dictionaries.Gender gender = parseGender(bits[5]); Dictionaries.Animacy animacy = parseAnimacy(bits[6]); int startIndex = Integer.parseInt(bits[7]); int endIndex = Integer.parseInt(bits[8]); String span = unescapeSpace(bits[14 + posLen]); CorefChain.CorefMention mention = new CorefChain.CorefMention( mentionType,
pw.println(cid + " " + countMentions(cluster)); for(int i = 0; i < mention.position.length(); i ++) pw.print(" " + mention.position.get(i)); pw.print(" " + escapeSpace(mention.mentionSpan)); pw.println();
boolean rep = bits[2].equals("1"); Dictionaries.MentionType mentionType = parseMentionType(bits[3]); Dictionaries.Number number = parseNumber(bits[4]); Dictionaries.Gender gender = parseGender(bits[5]); Dictionaries.Animacy animacy = parseAnimacy(bits[6]); int startIndex = Integer.valueOf(bits[7]); int endIndex = Integer.valueOf(bits[8]); String span = unescapeSpace(bits[14 + posLen]); CorefChain.CorefMention mention = new CorefChain.CorefMention( mentionType,
saveCorefChains(chains, pw); saveDependencyGraph(collapsedDeps, pw); SemanticGraph uncollapsedDeps = sent.get(SemanticGraphCoreAnnotations.BasicDependenciesAnnotation.class); saveDependencyGraph(uncollapsedDeps, pw); SemanticGraph ccDeps = sent.get(SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation.class); saveDependencyGraph(ccDeps, pw); if(tokens != null){ for(CoreLabel token: tokens){ saveToken(token, haveExplicitAntecedent, pw); pw.println();
public static void main(String[] args) throws Exception { Properties props = StringUtils.argsToProperties(args); StanfordCoreNLP pipeline = new StanfordCoreNLP(props); String file = props.getProperty("file"); String loadFile = props.getProperty("loadFile"); if (loadFile != null && ! loadFile.equals("")) { CustomAnnotationSerializer ser = new CustomAnnotationSerializer(false, false); InputStream is = new FileInputStream(loadFile); Pair<Annotation, InputStream> pair = ser.read(is); pair.second.close(); Annotation anno = pair.first; System.out.println(anno.toShorterString(new String[0])); is.close(); } else if (file != null && ! file.equals("")) { String text = edu.stanford.nlp.io.IOUtils.slurpFile(file); Annotation doc = new Annotation(text); pipeline.annotate(doc); CustomAnnotationSerializer ser = new CustomAnnotationSerializer(false, false); PrintStream os = new PrintStream(new FileOutputStream(file + ".ser")); ser.write(doc, os).close(); System.err.println("Serialized annotation saved in " + file + ".ser"); } else { System.err.println("usage: CustomAnnotationSerializer [-file file] [-loadFile file]"); } }
Map<Integer, CorefChain> chains = loadCorefChains(reader); if(chains != null) doc.set(CorefCoreAnnotations.CorefChainAnnotation.class, chains); IntermediateSemanticGraph intermCollapsedDeps = loadDependencyGraph(reader); IntermediateSemanticGraph intermUncollapsedDeps = loadDependencyGraph(reader); IntermediateSemanticGraph intermCcDeps = loadDependencyGraph(reader); while((line = reader.readLine()) != null){ if(line.length() == 0) break; CoreLabel token = loadToken(line, haveExplicitAntecedent); tokens.add(token);
pw.println(cid + " " + countMentions(cluster)); for(int i = 0; i < mention.position.length(); i ++) pw.print(" " + mention.position.get(i)); pw.print(" " + escapeSpace(mention.mentionSpan)); pw.println();
saveCorefChains(chains, pw); saveDependencyGraph(collapsedDeps, pw); SemanticGraph uncollapsedDeps = sent.get(SemanticGraphCoreAnnotations.BasicDependenciesAnnotation.class); saveDependencyGraph(uncollapsedDeps, pw); SemanticGraph ccDeps = sent.get(SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation.class); saveDependencyGraph(ccDeps, pw); if(tokens != null){ for(CoreLabel token: tokens){ saveToken(token, haveExplicitAntecedent, pw); pw.println();
public static void main(String[] args) throws Exception { Properties props = StringUtils.argsToProperties(args); StanfordCoreNLP pipeline = new StanfordCoreNLP(props); String file = props.getProperty("file"); String loadFile = props.getProperty("loadFile"); if (loadFile != null && ! loadFile.isEmpty()) { CustomAnnotationSerializer ser = new CustomAnnotationSerializer(false, false); InputStream is = new FileInputStream(loadFile); Pair<Annotation, InputStream> pair = ser.read(is); pair.second.close(); Annotation anno = pair.first; System.out.println(anno.toShorterString(StringUtils.EMPTY_STRING_ARRAY)); is.close(); } else if (file != null && ! file.equals("")) { String text = edu.stanford.nlp.io.IOUtils.slurpFile(file); Annotation doc = new Annotation(text); pipeline.annotate(doc); CustomAnnotationSerializer ser = new CustomAnnotationSerializer(false, false); PrintStream os = new PrintStream(new FileOutputStream(file + ".ser")); ser.write(doc, os).close(); log.info("Serialized annotation saved in " + file + ".ser"); } else { log.info("usage: CustomAnnotationSerializer [-file file] [-loadFile file]"); } }