"ixa-pipe-nerc-" + Files.getNameWithoutExtension(model), version + "-" + commit); newLp.setBeginTimestamp(); annotator.annotateNEs(kaf); newLp.setEndTimestamp();
/** Adds a linguistic processor to the document header. The timestamp is added implicitly. */ public LinguisticProcessor addLinguisticProcessor(String layer, String name) { LinguisticProcessor lp = new LinguisticProcessor(name, layer); //lp.setBeginTimestamp(timestamp); // no default timestamp List<LinguisticProcessor> layerLps = lps.get(layer); if (layerLps == null) { layerLps = new ArrayList<LinguisticProcessor>(); lps.put(layer, layerLps); } layerLps.add(lp); return lp; }
public void setBeginTimestamp(String timestamp) { this.beginTimestamp = timestamp; if (!this.hasHostname()) { try { this.setHostname(InetAddress.getLocalHost().getHostName()); } catch(UnknownHostException e) {} } }
String timestamp = getOptAttribute("timestamp", lpElem); if (timestamp != null) { newLp.setTimestamp(timestamp); newLp.setEndTimestamp(endTimestamp); newLp.setVersion(version); newLp.setHostname(hostname);
Element lpElem = new Element("lp"); lpElem.setAttribute("name", lp.name); if (lp.hasTimestamp()) { lpElem.setAttribute("timestamp", lp.timestamp); if (lp.hasBeginTimestamp()) { lpElem.setAttribute("beginTimestamp", lp.beginTimestamp); if (lp.hasEndTimestamp()) { lpElem.setAttribute("endTimestamp", lp.endTimestamp); if (lp.hasVersion()) { lpElem.setAttribute("version", lp.version); if (lp.hasHostname()) { lpElem.setAttribute("hostname", lp.hostname);
"terms", "ixa-pipe-pos-" + Files.getNameWithoutExtension(model), this.version + "-" + this.commit); newLp.setBeginTimestamp(); } else { annotator.getAllTagsLemmasToNAF(kaf); newLp.setEndTimestamp(); bwriter.write(kaf.toString()); } else { annotator.annotatePOSToKAF(kaf); newLp.setEndTimestamp(); bwriter.write(kaf.toString());
KAFDocument.LinguisticProcessor newLp = kaf.addLinguisticProcessor( "opinions", "ixa-pipe-nerc-" + Files.getNameWithoutExtension(model), version + "-" + commit); newLp.setBeginTimestamp(); OpinionTargetExtractor oteExtractor = new OpinionTargetExtractor(properties); oteExtractor.annotateOTE(kaf); newLp.setEndTimestamp(); String kafToString = null; if (outputFormat.equalsIgnoreCase("opennlp")) {
KAFDocument.LinguisticProcessor newLp = kaf.addLinguisticProcessor( "entities", "ixa-pipe-nerc-" + Files.getNameWithoutExtension(model), version + "-" + commit); newLp.setBeginTimestamp(); Annotate annotator = new Annotate(properties); annotator.annotateNEs(kaf); newLp.setEndTimestamp(); String kafToString = null; if (outputFormat.equalsIgnoreCase("conll03")) {
"terms", "ixa-pipe-pos-" + Files.getNameWithoutExtension(model), this.version + "-" + this.commit); newLp.setBeginTimestamp(); String kafToString = null; if (allMorphology) { } else { annotator.getAllTagsLemmasToNAF(kaf); newLp.setEndTimestamp(); kafToString = kaf.toString(); } else { annotator.annotatePOSToKAF(kaf); newLp.setEndTimestamp(); kafToString = kaf.toString();
"entities", "ixa-pipe-nerc-" + Files.getNameWithoutExtension(model), version + "-" + commit); newLp.setBeginTimestamp(); annotator.annotateOTE(kaf); newLp.setEndTimestamp();
public void setBeginTimestamp() { String timestamp = createTimestamp(); this.setBeginTimestamp(timestamp); }
public void addLinguisticProcessors(Map<String, List<LinguisticProcessor>> lps) { for (Map.Entry<String, List<LinguisticProcessor>> entry : lps.entrySet()) { List<LinguisticProcessor> layerLps = entry.getValue(); for (LinguisticProcessor lp : layerLps) { LinguisticProcessor newLp = this.addLinguisticProcessor(entry.getKey(), lp.name); if (lp.hasTimestamp()) newLp.setTimestamp(lp.getTimestamp()); if (lp.hasBeginTimestamp()) newLp.beginTimestamp = lp.beginTimestamp; if (lp.hasEndTimestamp()) newLp.setEndTimestamp(lp.getEndTimestamp()); if (lp.hasVersion()) newLp.setVersion(lp.getVersion()); } } }