/** Creates one of these objects for a custom comparator/parser. */ Entry(String field, Object custom) { this.field = StringHelper.intern(field); this.custom = custom; }
@Override protected Object createValue(IndexReader reader, Entry entryKey) throws IOException String field = StringHelper.intern(entryKey.field); final int[] retArray = new int[reader.maxDoc()]; String[] mterms = new String[reader.maxDoc() + 1];
@Override protected Object createValue(IndexReader reader, Entry entryKey) throws IOException { String field = StringHelper.intern(entryKey.field); final String[] retArray = new String[reader.maxDoc()]; Collection<IColumn> fcEntries = getFieldCacheEntries(reader, field); for (IColumn col : fcEntries) { if (col instanceof DeletedColumn) continue; int docId = CassandraUtils.readVInt(col.name()); String val = ByteBufferUtil.string(col.value()); retArray[docId] = val; } return retArray; } }
public LuceneDictionary(IndexReader reader, String field) { this.reader = reader; this.field = StringHelper.intern(field); }
public HighFrequencyDictionary(IndexReader reader, String field, float thresh) { this.reader = reader; this.field = StringHelper.intern(field); this.thresh = thresh; }
public HighFrequencyDictionary(IndexReader reader, String field, float thresh) { this.reader = reader; this.field = StringHelper.intern(field); this.thresh = thresh; }
/** * Visits the TermVectorMapper and populates it with terms available for a given document, * either via a vector created at index time or by resolving them from the inverted index. * * @param indexReader Index source * @param documentNumber Source document to access * @param fieldName Field to resolve * @param mapper Mapper to be mapped with data * @throws IOException */ public void accept(IndexReader indexReader, int documentNumber, String fieldName, TermVectorMapper mapper) throws IOException { fieldName = StringHelper.intern(fieldName); decoratedMapper.decorated = mapper; decoratedMapper.termVectorStored = false; indexReader.getTermFreqVector(documentNumber, fieldName, decoratedMapper); if (!decoratedMapper.termVectorStored) { mapper.setDocumentNumber(documentNumber); build(indexReader, fieldName, mapper, documentNumber); } }
public JSONWriter(Writer writer, SolrQueryRequest req, SolrQueryResponse rsp) { super(writer, req, rsp); namedListStyle = StringHelper.intern(req.getParams().get(JSON_NL_STYLE, JSON_NL_FLAT)); wrapperFunction = req.getParams().get(JSON_WRAPPER_FUNCTION); }
public int addStopWords(IndexReader reader, String fieldName, int maxDocFreq) throws IOException { HashSet<String> stopWords = new HashSet<String>(); String internedFieldName = StringHelper.intern(fieldName); TermEnum te = reader.terms(new Term(fieldName)); Term term = te.term();
String internedFieldName = StringHelper.intern(field); TermEnum te = indexReader.terms(new Term(field)); Term term = te.term();
@SuppressWarnings({"StringEquality"}) public static <T extends FieldData> T load(final IndexReader reader, String field, final TypeLoader<T> loader) throws IOException { logger.info("Loading field {}", field); field = StringHelper.intern(field); loader.init(field, reader.maxDoc()); do { readTermsAndDocs(reader,field,loader); } while(loader.finalizeSweep()); return loader.buildFieldData(); }
String idName = StringHelper.intern(ffs.keyField.getName()); FieldType idType = ffs.keyField.getType(); boolean sorted=true; // assume sorted until we discover it's not
"QueryElevationComponent requires the schema to have a uniqueKeyField" ); idField = StringHelper.intern(sf.getName());
String fieldName = StringHelper.intern(field); int[] termCounts = new int[0];
boolean raw = params.getBool(TermsParams.TERMS_RAW, false); for (int j = 0; j < fields.length; j++) { String field = StringHelper.intern(fields[j]); FieldType ft = raw ? null : rb.req.getSchema().getFieldTypeNoEx(field); if (ft==null) ft = new StrField();