public List<Document> docs() { return this.doc.docs(); }
/** * Makes the processing document as a tombstone document rather than a regular document. * Tombstone documents are stored in Lucene index to represent delete operations or Noops. */ ParsedDocument toTombstone() { assert docs().size() == 1 : "Tombstone should have a single doc [" + docs() + "]"; this.seqID.tombstoneField.setLongValue(1); rootDoc().add(this.seqID.tombstoneField); return this; }
public List<Document> docs() { return this.doc.docs(); }
public List<Document> docs() { return this.doc.docs(); }
public List<Document> docs() { return this.doc.docs(); }
public List<Document> docs() { return this.doc.docs(); }
if (softDeleteEnabled) { final ParsedDocument tombstone = engineConfig.getTombstoneDocSupplier().newDeleteTombstoneDoc(delete.type(), delete.id()); assert tombstone.docs().size() == 1 : "Tombstone doc should have single doc [" + tombstone + "]"; tombstone.updateSeqID(plan.seqNoOfDeletion, delete.primaryTerm()); tombstone.version().setLongValue(plan.versionOfDeletion); final ParseContext.Document doc = tombstone.docs().get(0); assert doc.getField(SeqNoFieldMapper.TOMBSTONE_NAME) != null : "Delete tombstone document but _tombstone field is not set [" + doc + " ]";
/** * Makes the processing document as a tombstone document rather than a regular document. * Tombstone documents are stored in Lucene index to represent delete operations or Noops. */ ParsedDocument toTombstone() { assert docs().size() == 1 : "Tombstone should have a single doc [" + docs() + "]"; this.seqID.tombstoneField.setLongValue(1); rootDoc().add(this.seqID.tombstoneField); return this; }
public Directory indexDocuments(List<ParsedDocument> parsedDocuments) { try{ Directory directory = new RAMDirectory(); IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_4_10_4, mapperService.analysisService().defaultIndexAnalyzer()); IndexWriter iwriter = new IndexWriter(directory, conf); for(ParsedDocument document : parsedDocuments){ for(ParseContext.Document doc : document.docs()){ iwriter.addDocument(doc, document.analyzer()); } } iwriter.close(); return directory; } catch(IOException e) { throw new ElasticsearchException("Failed to write documents to RAMDirectory", e); } } }
private static Response prepareRamIndex(Request request, CheckedBiFunction<QueryShardContext, LeafReaderContext, Response, IOException> handler, IndexService indexService) throws IOException { Analyzer defaultAnalyzer = indexService.getIndexAnalyzers().getDefaultIndexAnalyzer(); try (RAMDirectory ramDirectory = new RAMDirectory()) { try (IndexWriter indexWriter = new IndexWriter(ramDirectory, new IndexWriterConfig(defaultAnalyzer))) { String index = indexService.index().getName(); assert indexService.mapperService().types().size() == 1; String type = indexService.mapperService().types().iterator().next(); BytesReference document = request.contextSetup.document; XContentType xContentType = request.contextSetup.xContentType; SourceToParse sourceToParse = SourceToParse.source(index, type, "_id", document, xContentType); ParsedDocument parsedDocument = indexService.mapperService().documentMapper(type).parse(sourceToParse); indexWriter.addDocuments(parsedDocument.docs()); try (IndexReader indexReader = DirectoryReader.open(indexWriter)) { final long absoluteStartMillis = System.currentTimeMillis(); QueryShardContext context = indexService.newQueryShardContext(0, indexReader, () -> absoluteStartMillis, null); return handler.apply(context, indexReader.leaves().get(0)); } } } } }
if (docs.size() > 1 || docs.get(0).docs().size() > 1) { assert docs.size() != 1 || docMapper.hasNestedObjects(); docSearcher = createMultiDocumentSearcher(analyzer, docs);
if (docs.size() > 1 || docs.get(0).docs().size() > 1) { assert docs.size() != 1 || docMapper.hasNestedObjects(); docSearcher = createMultiDocumentSearcher(analyzer, docs);
if (softDeleteEnabled) { final ParsedDocument tombstone = engineConfig.getTombstoneDocSupplier().newDeleteTombstoneDoc(delete.type(), delete.id()); assert tombstone.docs().size() == 1 : "Tombstone doc should have single doc [" + tombstone + "]"; tombstone.updateSeqID(plan.seqNoOfDeletion, delete.primaryTerm()); tombstone.version().setLongValue(plan.versionOfDeletion); final ParseContext.Document doc = tombstone.docs().get(0); assert doc.getField(SeqNoFieldMapper.TOMBSTONE_NAME) != null : "Delete tombstone document but _tombstone field is not set [" + doc + " ]";
logger.trace("index={} id={} type={} routing={} docs={}", context.indexInfo.name, parsedDoc.id(), parsedDoc.type(), parsedDoc.routing(), parsedDoc.docs());
if (parsedDocument.docs().size() > 1) { assert isNested; percolatorIndex = multi;