/** * Makes the processing document as a tombstone document rather than a regular document. * Tombstone documents are stored in Lucene index to represent delete operations or Noops. */ ParsedDocument toTombstone() { assert docs().size() == 1 : "Tombstone should have a single doc [" + docs() + "]"; this.seqID.tombstoneField.setLongValue(1); rootDoc().add(this.seqID.tombstoneField); return this; }
sb.append("took[").append(TimeValue.timeValueNanos(tookInNanos)).append("], "); sb.append("took_millis[").append(TimeUnit.NANOSECONDS.toMillis(tookInNanos)).append("], "); sb.append("type[").append(doc.type()).append("], "); sb.append("id[").append(doc.id()).append("], "); if (doc.routing() == null) { sb.append("routing[]"); } else { sb.append("routing[").append(doc.routing()).append("]"); if (maxSourceCharsToLog == 0 || doc.source() == null || doc.source().length() == 0) { return sb.toString(); String source = XContentHelper.convertToJson(doc.source(), reformat, doc.getXContentType()); sb.append(", source[").append(Strings.cleanTruncate(source, maxSourceCharsToLog)).append("]"); } catch (IOException e) {
private static ParsedDocument parsedDocument(SourceToParse source, ParseContext.InternalParseContext context, Mapping update) { return new ParsedDocument( context.version(), context.seqID(), context.sourceToParse().id(), context.sourceToParse().type(), source.routing(), context.docs(), context.sourceToParse().source(), context.sourceToParse().getXContentType(), update ).parent(source.parent()); }
public static Engine.Index prepareIndex(DocumentMapperForType docMapper, Version indexCreatedVersion, SourceToParse source, long seqNo, long primaryTerm, long version, VersionType versionType, Engine.Operation.Origin origin, long autoGeneratedIdTimestamp, boolean isRetry, long ifSeqNo, long ifPrimaryTerm) { long startTime = System.nanoTime(); ParsedDocument doc = docMapper.getDocumentMapper().parse(source); if (docMapper.getMapping() != null) { doc.addDynamicMappingsUpdate(docMapper.getMapping()); } Term uid; if (indexCreatedVersion.onOrAfter(Version.V_6_0_0_beta1)) { uid = new Term(IdFieldMapper.NAME, Uid.encodeId(doc.id())); } else if (docMapper.getDocumentMapper().idFieldMapper().fieldType().indexOptions() != IndexOptions.NONE) { uid = new Term(IdFieldMapper.NAME, doc.id()); } else { uid = new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(doc.type(), doc.id())); } return new Engine.Index(uid, doc, seqNo, primaryTerm, version, versionType, origin, startTime, autoGeneratedIdTimestamp, isRetry, ifSeqNo, ifPrimaryTerm); }
final ParsedDocument parsedDoc = new ParsedDocument( context.version(), (isStatic()) ? partitionKey : id, parsedDoc.parent(context.parent()); logger.trace("index={} id={} type={} routing={} docs={}", context.indexInfo.name, parsedDoc.id(), parsedDoc.type(), parsedDoc.routing(), parsedDoc.docs()); baseCfs.metadata.ksName, baseCfs.metadata.cfName, context.indexInfo.name, typeName, parsedDoc.id(), operation.version(), result.isCreated(), isStatic(), ttl, context.indexInfo.refresh);
if (softDeleteEnabled) { final ParsedDocument tombstone = engineConfig.getTombstoneDocSupplier().newDeleteTombstoneDoc(delete.type(), delete.id()); assert tombstone.docs().size() == 1 : "Tombstone doc should have single doc [" + tombstone + "]"; tombstone.updateSeqID(plan.seqNoOfDeletion, delete.primaryTerm()); tombstone.version().setLongValue(plan.versionOfDeletion); final ParseContext.Document doc = tombstone.docs().get(0); assert doc.getField(SeqNoFieldMapper.TOMBSTONE_NAME) != null : "Delete tombstone document but _tombstone field is not set [" + doc + " ]";
public List<Document> docs() { return this.doc.docs(); }
private static Fields generateTermVectorsFromDoc(IndexShard indexShard, TermVectorsRequest request) throws IOException { // parse the document, at the moment we do update the mapping, just like percolate ParsedDocument parsedDocument = parseDocument(indexShard, indexShard.shardId().getIndexName(), request.type(), request.doc(), request.xContentType()); // select the right fields and generate term vectors ParseContext.Document doc = parsedDocument.rootDoc(); Set<String> seenFields = new HashSet<>(); Collection<DocumentField> documentFields = new HashSet<>(); for (IndexableField field : doc.getFields()) { MappedFieldType fieldType = indexShard.mapperService().fullName(field.name()); if (!isValidField(fieldType)) { continue; } if (request.selectedFields() != null && !request.selectedFields().contains(field.name())) { continue; } if (seenFields.contains(field.name())) { continue; } else { seenFields.add(field.name()); } String[] values = doc.getValues(field.name()); documentFields.add(new DocumentField(field.name(), Arrays.asList((Object[]) values))); } return generateTermVectors(indexShard, XContentHelper.convertToMap(parsedDocument.source(), true, request.xContentType()).v2(), documentFields, request.offsets(), request.perFieldAnalyzer(), seenFields); }
public void initialize(Engine.Searcher docSearcher, ParsedDocument parsedDocument) { this.docSearcher = docSearcher; IndexReader indexReader = docSearcher.reader(); LeafReaderContext atomicReaderContext = indexReader.leaves().get(0); LeafSearchLookup leafLookup = lookup().getLeafSearchLookup(atomicReaderContext); leafLookup.setDocument(0); leafLookup.source().setSource(parsedDocument.source()); Map<String, SearchHitField> fields = new HashMap<>(); for (IndexableField field : parsedDocument.rootDoc().getFields()) { fields.put(field.name(), new InternalSearchHitField(field.name(), Collections.emptyList())); } hitContext().reset( new InternalSearchHit(0, "unknown", new Text(parsedDocument.type()), fields), atomicReaderContext, 0, docSearcher.searcher() ); }
@Override public String id() { return this.doc.id(); }
private Engine.IndexResult applyIndexOperation(Engine engine, long seqNo, long opPrimaryTerm, long version, VersionType versionType, long ifSeqNo, long ifPrimaryTerm, long autoGeneratedTimeStamp, boolean isRetry, Engine.Operation.Origin origin, SourceToParse sourceToParse) throws IOException { assert opPrimaryTerm <= this.operationPrimaryTerm : "op term [ " + opPrimaryTerm + " ] > shard term [" + this.operationPrimaryTerm + "]"; assert versionType.validateVersionForWrites(version); ensureWriteAllowed(origin); Engine.Index operation; try { operation = prepareIndex(docMapper(sourceToParse.type()), indexSettings.getIndexVersionCreated(), sourceToParse, seqNo, opPrimaryTerm, version, versionType, origin, autoGeneratedTimeStamp, isRetry, ifSeqNo, ifPrimaryTerm); Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); if (update != null) { return new Engine.IndexResult(update); } } catch (Exception e) { // We treat any exception during parsing and or mapping update as a document level failure // with the exception side effects of closing the shard. Since we don't have the shard, we // can not raise an exception that may block any replication of previous operations to the // replicas verifyNotClosed(e); return new Engine.IndexResult(e, version, opPrimaryTerm, seqNo); } return index(engine, operation); }
@Override public void prepare(PercolateContext context, ParsedDocument parsedDocument) { IndexReader[] memoryIndices = new IndexReader[parsedDocument.docs().size()]; List<ParseContext.Document> docs = parsedDocument.docs(); int rootDocIndex = docs.size() - 1; assert rootDocIndex > 0; memoryIndex = new MemoryIndex(true); Analyzer analyzer = context.mapperService().documentMapper(parsedDocument.type()).mappers().indexAnalyzer(); memoryIndices[i] = indexDoc(d, analyzer, memoryIndex).createSearcher().getIndexReader();
@Override public void prepare(PercolateContext context, ParsedDocument parsedDocument) { MemoryIndex memoryIndex = cache.get(); for (IndexableField field : parsedDocument.rootDoc().getFields()) { if (field.fieldType().indexOptions() == IndexOptions.NONE && field.name().equals(UidFieldMapper.NAME)) { continue; } try { Analyzer analyzer = context.mapperService().documentMapper(parsedDocument.type()).mappers().indexAnalyzer(); // TODO: instead of passing null here, we can have a CTL<Map<String,TokenStream>> and pass previous, // like the indexer does try (TokenStream tokenStream = field.tokenStream(analyzer, null)) { if (tokenStream != null) { memoryIndex.addField(field.name(), tokenStream, field.boost()); } } } catch (Exception e) { throw new ElasticsearchException("Failed to create token stream for [" + field.name() + "]", e); } } context.initialize(new DocEngineSearcher(memoryIndex), parsedDocument); }
public Directory indexDocuments(List<ParsedDocument> parsedDocuments) { try{ Directory directory = new RAMDirectory(); IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_4_10_4, mapperService.analysisService().defaultIndexAnalyzer()); IndexWriter iwriter = new IndexWriter(directory, conf); for(ParsedDocument document : parsedDocuments){ for(ParseContext.Document doc : document.docs()){ iwriter.addDocument(doc, document.analyzer()); } } iwriter.close(); return directory; } catch(IOException e) { throw new ElasticsearchException("Failed to write documents to RAMDirectory", e); } } }
public ParsedDocument createNoopTombstoneDoc(String index, String reason) throws MapperParsingException { final String id = ""; // _id won't be used. final SourceToParse sourceToParse = SourceToParse.source(index, type, id, new BytesArray("{}"), XContentType.JSON); final ParsedDocument parsedDoc = documentParser.parseDocument(sourceToParse, noopTombstoneMetadataFieldMappers).toTombstone(); // Store the reason of a noop as a raw string in the _source field final BytesRef byteRef = new BytesRef(reason); parsedDoc.rootDoc().add(new StoredField(SourceFieldMapper.NAME, byteRef.bytes, byteRef.offset, byteRef.length)); return parsedDoc; }
private ParsedDocument parseDocument(String index, String type, BytesReference doc) throws Throwable { MapperService mapperService = indexShard.mapperService(); // TODO: make parsing not dynamically create fields not in the original mapping DocumentMapperForType docMapper = mapperService.documentMapperWithAutoCreate(type); ParsedDocument parsedDocument = docMapper.getDocumentMapper().parse(source(doc).index(index).type(type).flyweight(true)); if (docMapper.getMapping() != null) { parsedDocument.addDynamicMappingsUpdate(docMapper.getMapping()); } if (parsedDocument.dynamicMappingsUpdate() != null) { mappingUpdatedAction.updateMappingOnMasterSynchronously(index, type, parsedDocument.dynamicMappingsUpdate()); } return parsedDocument; }
doc = docMapper.getDocumentMapper().parse(source(parser).index(index).type(request.documentType()).flyweight(true)); if (docMapper.getMapping() != null) { doc.addDynamicMappingsUpdate(docMapper.getMapping()); if (doc.dynamicMappingsUpdate() != null) { mappingUpdatedAction.updateMappingOnMasterSynchronously(request.shardId().getIndex(), request.documentType(), doc.dynamicMappingsUpdate()); builder.copyCurrentStructure(parser); builder.close(); doc.setSource(bStream.bytes()); break; } else {
@Override public String type() { return this.doc.type(); }
private static ParsedDocument parseDocument(IndexShard indexShard, String index, String type, BytesReference doc, XContentType xContentType) { MapperService mapperService = indexShard.mapperService(); DocumentMapperForType docMapper = mapperService.documentMapperWithAutoCreate(type); ParsedDocument parsedDocument = docMapper.getDocumentMapper().parse(source(index, type, "_id_for_tv_api", doc, xContentType)); if (docMapper.getMapping() != null) { parsedDocument.addDynamicMappingsUpdate(docMapper.getMapping()); } return parsedDocument; }