Refine search
if (field.isIndexed() && field.isTokenized()) TokenStream tokens = field.tokenStreamValue(); Reader tokReader = field.readerValue(); tokReader = new StringReader(field.stringValue()); tokens = analyzer.reusableTokenStream(field.name(), tokReader); position += analyzer.getPositionIncrementGap(field.name()); if (field.isStoreOffsetWithTermVector()) offsetAttribute = (OffsetAttribute) tokens.addAttribute(OffsetAttribute.class); Term term = new Term(field.name(), termAttribute.toString()); if (field.isStoreOffsetWithTermVector()) if (!field.getOmitNorms()) invertState.setBoost(doc.getBoost() * field.getBoost()); invertState.setLength(tokensInField); final float norm = similarity.computeNorm(field.name(), invertState); if (!field.getOmitNorms()) if (field.isIndexed() && !field.isTokenized())
public static ByteBuffer createColumnName(Fieldable field) { return ByteBuffer.wrap(createColumnName(field.name(), field.stringValue())); }
Document copy = new Document(); copy.add(new Field(FieldNames.REINDEXING_REQUIRED, false, "", Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS, Field.TermVector.NO)); for (Fieldable f : doc.getFields()) { Fieldable field = null; Field.TermVector tv = getTermVectorParameter(f); Field.Store stored = f.isStored() ? Field.Store.YES : Field.Store.NO; Field.Index indexed = getIndexParameter(f); if (f instanceof LazyTextExtractorField || f.readerValue() != null) { field = new Field(f.name(), new StringReader(""), tv); } else if (f.stringValue() != null) { field = new Field(f.name(), false, f.stringValue(), stored, indexed, tv); } else if (f.isBinary()) { field = new Field(f.name(), f.getBinaryValue(), stored); } else if (f.tokenStreamValue() != null && f.tokenStreamValue() instanceof SingletonTokenStream) { TokenStream tokenStream = f.tokenStreamValue(); TermAttribute termAttribute = tokenStream.addAttribute(TermAttribute.class); PayloadAttribute payloadAttribute = tokenStream.addAttribute(PayloadAttribute.class); String value = new String(termAttribute.termBuffer(), 0, termAttribute.termLength()); tokenStream.reset(); field = new Field(f.name(), new SingletonTokenStream(value, (Payload) payloadAttribute.getPayload().clone())); field.setOmitNorms(f.getOmitNorms()); copy.add(field);
@Override public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException { Document doc = super.document(n, fieldSelector); Fieldable[] fields = doc.getFieldables(FieldNames.PROPERTIES); if (fields != null) { doc.removeFields(FieldNames.PROPERTIES); for (Fieldable field : fields) { String value = field.stringValue(); value = value.replace(oldSepChar, '['); doc.add(new Field(FieldNames.PROPERTIES, false, value, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS, Field.TermVector.NO)); } } return doc; }
if (aggregates != null) Fieldable[] fulltext = doc.getFieldables(FieldNames.FULLTEXT); if (fulltext != null) if (!fulltext[k].isStored()) doc.removeField(fulltext[k].name()); createDocument(aggregates[j], getNamespaceMappings(), index.getIndexFormatVersion(), loadAllProperties); Fieldable[] fulltextFields = aDoc.getFieldables(FieldNames.FULLTEXT); if (fulltextFields != null) if (fulltextFields[k].isStored()) doc.add(fulltextFields[k]); field.tokenStreamValue().incrementToken(); CharTermAttribute term = field.tokenStreamValue().getAttribute(CharTermAttribute.class); PayloadAttribute payload = field.tokenStreamValue().getAttribute(PayloadAttribute.class); doc.add(new Field(field.name(), new SingletonTokenStream(term.toString(), payload .getPayload()))); doc.add(new Field(FieldNames.AGGREGATED_NODE_UUID, parent.getIdentifier(),
/** Adds field info for a Document. */ synchronized public void add(Document doc) { List fields = doc.getFields(); Iterator fieldIterator = fields.iterator(); while (fieldIterator.hasNext()) { Fieldable field = (Fieldable) fieldIterator.next(); add(field.name(), field.isIndexed(), field.isTermVectorStored(), field.isStorePositionWithTermVector(), field.isStoreOffsetWithTermVector(), field.getOmitNorms()); } }
private Map<String, String> toMap(final Document d) { final HashMap<String, String> result = new HashMap<String, String>(); for (Object o : d.getFields()) { final Fieldable f = (Fieldable) o; if (f.isStored()) { result.put(f.name(), f.stringValue()); } } return result; }
public LuceneResource(String alias, Document document, int docNum, LuceneSearchEngineFactory searchEngineFactory) { this.searchEngineFactory = searchEngineFactory; String aliasProperty = searchEngineFactory.getAliasProperty(); this.docNum = docNum; if (alias != null) { this.alias = alias; Field aliasField = new Field(aliasProperty, alias, Field.Store.YES, Field.Index.NOT_ANALYZED); aliasField.setOmitNorms(true); properties.add(new LuceneProperty(aliasField)); } else { Fieldable aliasField = document.getField(aliasProperty); if (aliasField != null) { properties.add(new LuceneProperty(aliasField)); this.alias = aliasField.stringValue(); } } verifyResourceMapping(); List fields = document.getFields(); for (Iterator fieldsIt = fields.iterator(); fieldsIt.hasNext();) { Fieldable field = (Fieldable) fieldsIt.next(); if (field.name().equals(aliasProperty)) { continue; } LuceneProperty lProperty = new LuceneProperty(field); lProperty.setPropertyMapping(resourceMapping.getResourcePropertyMapping(field.name())); properties.add(lProperty); } }
final void addDocument(Document doc) throws IOException { indexStream.writeLong(fieldsStream.getFilePointer()); int storedCount = 0; Iterator fieldIterator = doc.getFields().iterator(); while (fieldIterator.hasNext()) { Fieldable field = (Fieldable) fieldIterator.next(); if (field.isStored()) storedCount++; } fieldsStream.writeVInt(storedCount); fieldIterator = doc.getFields().iterator(); while (fieldIterator.hasNext()) { Fieldable field = (Fieldable) fieldIterator.next(); if (field.isStored()) writeField(fieldInfos.fieldInfo(field.name()), field); } }
public SolrDocument getDoc(Document doc) { SolrDocument solrDoc = new SolrDocument(); for (Fieldable f : (List<Fieldable>) doc.getFields()) { String fieldName = f.name(); if (returnFields != null && !returnFields.contains(fieldName)) continue; FieldType ft = schema.getFieldTypeNoEx(fieldName); Object val; if (ft == null) { // handle fields not in the schema if (f.isBinary()) val = f.binaryValue(); else val = f.stringValue(); } else { try { if (useFieldObjects && KNOWN_TYPES.contains(ft.getClass())) { val = ft.toObject(f); } else { val = ft.toExternal(f); } } catch (Exception e) { // There is a chance of the underlying field not really matching the // actual field type . So ,it can throw exception LOG.warn("Error reading a field from document : " + solrDoc, e); //if it happens log it and continue continue; } } solrDoc.addField(fieldName, val); } return solrDoc; }
public SearchItem toResult(int documentId) throws IOException { Document document = searcher.doc(documentId); String type = document.getFieldable(FieldNames.TYPE).stringValue(); NumericField date = (NumericField)document.getFieldable(FieldNames.DATE); Fieldable path = document.getFieldable(FieldNames.PATH); NumericField version = (NumericField)document.getFieldable(FieldNames.VERSION); return new SearchItem ( Integer.parseInt(type), path.stringValue(), (version != null) ? version.getNumericValue().intValue() : -1, new Date(date.getNumericValue().longValue()) ); }
private InteractionOntologyTerm createOntologyTerm(Document document, Highlighter highlighter) throws IOException { String identifier = document.getFieldable("identifier").stringValue(); String label = document.getFieldable("label").stringValue(); String databaseLabel = document.getFieldable("databaseLabel").stringValue(); String fieldName = document.getFieldable("fieldName").stringValue(); int count = Integer.parseInt(document.getField("count").stringValue()); if (isHighlight()) { label = highlightText("label", label, highlighter); } InteractionOntologyTerm term = new InteractionOntologyTerm(label, identifier); term.setResults(new InteractionOntologyTermResults(databaseLabel, fieldName, count)); return term; }
for (Fieldable f : zio.getFields()) { ret.put(f.name(), f.stringValue());
private List<String> getNonEmptyFieldNames() { final List<Fieldable> fields = doc.getFields(); final List<String> names = new ArrayList<String>(fields.size()); for (final Fieldable field : fields) { // NOTE: we do not store the field value since we are never interested in reading the value out of the // document, we are just interested in searching it. This will keep us from adding to the size of the issue // document. if (field.isIndexed()) { names.add(field.name()); } } return names; } }
/** Given the stored field, return the indexed form */ public String storedToIndexed(Fieldable f) { // right now, the transformation of single valued fields like SortableInt // is done when the Field is created, not at analysis time... this means // that the indexed form is the same as the stored field form. return f.stringValue(); }
/** Returns the string value of the field with the given name if any exist in * this document, or null. If multiple fields exist with this name, this * method returns the first value added. If only binary fields with this name * exist, returns null. */ public final String get(String name) { for (int i = 0; i < fields.size(); i++) { Fieldable field = (Fieldable)fields.get(i); if (field.name().equals(name) && (!field.isBinary())) return field.stringValue(); } return null; }
if (field.isStored()) if (field.isIndexed()) if (field.isTokenized()) if (field.getOmitNorms()) if (field.isBinary()) if (field.isTermVectorStored()) if (field.isStorePositionWithTermVector()) if (field.isStoreOffsetWithTermVector()) if (field.isLazy()) if (field.getBoost() != 1.0f)
public void processFields(final Fieldable[] fields, final int count) throws IOException { fieldState.reset(docState.doc.getBoost()); if (field.isIndexed() && doInvert) { if (!field.isTokenized()) { // un-tokenized field String stringValue = field.stringValue(); final int valueLength = stringValue.length(); Token token = perThread.localToken.reinit(stringValue, 0, valueLength); } else { // tokenized field final TokenStream stream; final TokenStream streamValue = field.tokenStreamValue(); final Reader readerValue = field.readerValue(); String stringValue = field.stringValue(); if (stringValue == null) throw new IllegalArgumentException("field must have either TokenStream, String or Reader value"); fieldState.boost *= field.getBoost();