private void serializeNumericField(Serializer serializer, IndexableField fieldable, FieldType fieldType, NumericType numericType) { LuceneNumericFieldContext context = new LuceneNumericFieldContext( fieldType, fieldable.name(), fieldable.boost() ); switch ( numericType ) { case INT: serializer.addIntNumericField( fieldable.numericValue().intValue(), context ); break; case LONG: serializer.addLongNumericField( fieldable.numericValue().longValue(), context ); break; case FLOAT: serializer.addFloatNumericField( fieldable.numericValue().floatValue(), context ); break; case DOUBLE: serializer.addDoubleNumericField( fieldable.numericValue().doubleValue(), context ); break; default: String dataType = numericType.toString(); throw log.unknownNumericFieldType( dataType ); } }
private void serializeNumericField(Serializer serializer, IndexableField fieldable, FieldType fieldType, NumericType numericType) { LuceneNumericFieldContext context = new LuceneNumericFieldContext( fieldType, fieldable.name(), fieldable.boost() ); switch ( numericType ) { case INT: serializer.addIntNumericField( fieldable.numericValue().intValue(), context ); break; case LONG: serializer.addLongNumericField( fieldable.numericValue().longValue(), context ); break; case FLOAT: serializer.addFloatNumericField( fieldable.numericValue().floatValue(), context ); break; case DOUBLE: serializer.addDoubleNumericField( fieldable.numericValue().doubleValue(), context ); break; default: String dataType = numericType.toString(); throw log.unknownNumericFieldType( dataType ); } }
MemoryIndex indexDoc(ParseContext.Document d, Analyzer analyzer, MemoryIndex memoryIndex) { for (IndexableField field : d.getFields()) { if (field.fieldType().indexOptions() == IndexOptions.NONE && field.name().equals(UidFieldMapper.NAME)) { continue; } try { // TODO: instead of passing null here, we can have a CTL<Map<String,TokenStream>> and pass previous, // like the indexer does try (TokenStream tokenStream = field.tokenStream(analyzer, null)) { if (tokenStream != null) { memoryIndex.addField(field.name(), tokenStream, field.boost()); } } } catch (IOException e) { throw new ElasticsearchException("Failed to create token stream", e); } } return memoryIndex; }
invertState.boost *= field.boost();
if (fieldType.omitNorms() && field.boost() != 1.0f) { throw new UnsupportedOperationException("You cannot set an index-time boost: norms are omitted for field '" + field.name() + "'");
if (fieldType.omitNorms() && field.boost() != 1.0f) { throw new UnsupportedOperationException("You cannot set an index-time boost: norms are omitted for field '" + field.name() + "'");
invertState.boost *= field.boost();
@Override public void prepare(PercolateContext context, ParsedDocument parsedDocument) { MemoryIndex memoryIndex = cache.get(); for (IndexableField field : parsedDocument.rootDoc().getFields()) { if (field.fieldType().indexOptions() == IndexOptions.NONE && field.name().equals(UidFieldMapper.NAME)) { continue; } try { Analyzer analyzer = context.mapperService().documentMapper(parsedDocument.type()).mappers().indexAnalyzer(); // TODO: instead of passing null here, we can have a CTL<Map<String,TokenStream>> and pass previous, // like the indexer does try (TokenStream tokenStream = field.tokenStream(analyzer, null)) { if (tokenStream != null) { memoryIndex.addField(field.name(), tokenStream, field.boost()); } } } catch (Exception e) { throw new ElasticsearchException("Failed to create token stream for [" + field.name() + "]", e); } } context.initialize(new DocEngineSearcher(memoryIndex), parsedDocument); }
builder.field("storeTermVectorPayloads", field.fieldType().storeTermVectorPayloads()); builder.field("storeTermVectorPositions", field.fieldType().storeTermVectorPositions()); builder.field("boost", field.boost()); builder.endObject(); return builder;
if (field.fieldType().indexOptions() != IndexOptions.NONE && !field.fieldType().omitNorms()) { if (!encounteredFields.contains(field.name())) { ((Field) field).setBoost(context.docBoost() * field.boost()); encounteredFields.add(field.name());