if (!(((SolrIndexReader) rb.req.getSearcher().getIndexReader()).getWrappedReader() instanceof lucandra.IndexReader)) return false; throw new IOException("Missing core name"); IndexReader reader = (IndexReader) ((SolrIndexReader) rb.req.getSearcher().getIndexReader()).getWrappedReader(); IndexReader reader = (IndexReader) ((SolrIndexReader) rb.req.getSearcher().getIndexReader()) .getWrappedReader();
public void build(SolrCore core, SolrIndexSearcher searcher) throws IOException { LOG.info("build()"); reader = searcher.getIndexReader(); //dictionary = new HighFrequencyDictionary(reader, field, threshold); // first, use class above to get all terms above the frequency // then execute a pivot facet }
/** Retrieve a {@link Document} using a {@link org.apache.lucene.document.FieldSelector} * This method does not currently use the Solr document cache. * * @see IndexReader#document(int, FieldSelector) */ public Document doc(int n, FieldSelector fieldSelector) throws IOException { return getIndexReader().document(n, fieldSelector); }
private long getIndexVersion(SolrIndexSearcher searcher){ return searcher.getIndexReader().getVersion(); }
public TaggerFstCorpus run() throws IOException { return new TaggerFstCorpus(reader, searcher.getIndexReader().getVersion(), null, fstInfo.indexedField, fstInfo.storedField, fstInfo.analyzer, fstInfo.partialMatches,1,100); } });
private void doSearch1(SolrDocumentList results, SolrIndexSearcher searcher, String q, Filter filter, int ndocs, SolrQueryRequest req, Map<String,SchemaField> fields, Set<Integer> alreadyFound) throws IOException { // build custom query and extra fields Query query = null; //buildCustomQuery1(q); Map<String,Object> extraFields = new HashMap<String,Object>(); extraFields.put("search_type", "search1"); boolean includeScore = req.getParams().get(CommonParams.FL).contains("score"); int maxDocsPerSearcherType = 0; float maprelScoreCutoff = 2.0f; append(results, searcher.search( query, filter, maxDocsPerSearcherType).scoreDocs, alreadyFound, fields, extraFields, maprelScoreCutoff , searcher.getIndexReader(), includeScore); }
private void doSearch1(SolrDocumentList results, SolrIndexSearcher searcher, String q, Filter filter, int ndocs, SolrQueryRequest req, Map<String,SchemaField> fields, Set<Integer> alreadyFound) throws IOException { // build custom query and extra fields Query query = null; //buildCustomQuery1(q); Map<String,Object> extraFields = new HashMap<String,Object>(); extraFields.put("search_type", "search1"); boolean includeScore = req.getParams().get(CommonParams.FL).contains("score"); int maxDocsPerSearcherType = 0; float maprelScoreCutoff = 2.0f; append(results, searcher.search( query, filter, maxDocsPerSearcherType).scoreDocs, alreadyFound, fields, extraFields, maprelScoreCutoff , searcher.getIndexReader(), includeScore); }
/** * Retrieve the {@link Document} instance corresponding to the document id. * * Note: The document will have all fields accessable, but if a field * filter is provided, only the provided fields will be loaded (the * remainder will be available lazily). */ public Document doc(int i, Set<String> fields) throws IOException { Document d; if (documentCache != null) { d = (Document)documentCache.get(i); if (d!=null) return d; } if(!enableLazyFieldLoading || fields == null) { d = getIndexReader().document(i); } else { d = getIndexReader().document(i, new SetNonLazyFieldSelector(fields)); } if (documentCache != null) { documentCache.put(i, d); } return d; }
LOG.info("build()"); if (sourceLocation == null) { reader = searcher.getIndexReader(); dictionary = new HighFrequencyDictionary(reader, field, threshold); } else {
LOG.info("build()"); if (sourceLocation == null) { reader = searcher.getIndexReader(); dictionary = new HighFrequencyDictionary(reader, field, threshold); } else {
corpus = new TaggerFstCorpus(reader, searcher.getIndexReader().getVersion(), null, corpusInfo.indexedField, corpusInfo.storedField, corpusInfo.analyzer, corpusInfo.partialMatches,1,200);
@Override public void newSearcher(SolrIndexSearcher newSearcher, SolrIndexSearcher currentSearcher) { if (currentSearcher == null) { // firstSearcher event try { LOG.info("Loading spell index for spellchecker: " + checker.getDictionaryName()); checker.reload(core, newSearcher); } catch (IOException e) { LOG.error( "Exception in reloading spell check index for spellchecker: " + checker.getDictionaryName(), e); } } else { // newSearcher event if (buildOnCommit) { buildSpellIndex(newSearcher); } else if (buildOnOptimize) { if (newSearcher.getIndexReader().leaves().size() == 1) { buildSpellIndex(newSearcher); } else { LOG.info("Index is not optimized therefore skipping building spell check index for: " + checker.getDictionaryName()); } } } }
corpus = new TaggerFstCorpus(reader, searcher.getIndexReader().getVersion(), null, corpusInfo.indexedField, corpusInfo.storedField, corpusInfo.analyzer, corpusInfo.partialMatches,1,200);
/** * If this {@link IndexConfiguration} is still in sync with the version * of the {@link #getIndex() SolrCore}. This will return true if * <code>{@link #isRuntimeGeneration()} == false </code> * @return <code>true</code> if still active. Otherwise <code>false</code> */ public boolean isCurrent(){ if(!runtimeGeneration){ return true; } else { RefCounted<SolrIndexSearcher> searcherRef = index.getSearcher(); try { long version = searcherRef.get().getIndexReader().getVersion(); return indexVersion == version; } finally { searcherRef.decref(); } } }
/** * {@inheritDoc} */ @Override protected void doQuery(Query mainQuery, List<Query> filterQueries, Sort sort) throws IOException { final int maxDoc = searcher.maxDoc(); if (containsSortOnScore(sort)) { DocSetScoreCollector docSetCollector = new DocSetScoreCollector(searcher.maxDoc()); long startTime = System.currentTimeMillis(); DocSet filter = searcher.getDocSet(filterQueries); uncollapsedDocSet = searcher.getDocSet(mainQuery, filter, docSetCollector); timeCreateUncollapedDocset = System.currentTimeMillis() - startTime; if (sort == null) { sort = new Sort(new SortField("score", SortField.SCORE, true)); } float[] scores; if (uncollapsedDocSet instanceof DocSetScoreCollector.DelegateDocSet) { scores = ((DocSetScoreCollector.DelegateDocSet) uncollapsedDocSet).getScores(); } else { scores = docSetCollector.getScores(); } documentComparator = new DocumentComparator(sort, maxDoc, searcher.getIndexReader(), scores); } else { long startTime = System.currentTimeMillis(); DocSet filter = searcher.getDocSet(filterQueries); uncollapsedDocSet = searcher.getDocSet(mainQuery, filter); timeCreateUncollapedDocset = System.currentTimeMillis() - startTime; documentComparator = new DocumentComparator(sort, maxDoc, searcher.getIndexReader()); } }
public int getNumberOfSegments() { return this.getCore().getSearcher().get().getIndexReader().leaves().size(); }
public int getNumberOfSegments() { return this.getCore().getSearcher().get().getIndexReader().leaves().size(); }
@Override public RewriterFactory createRewriterFactory(final NamedList<?> args, ResourceLoader resourceLoader) { // the minimum frequency of the term in the index' dictionary field to be considered a valid compound // or constituent final Integer minSuggestionFreq = getOrDefault(args, "minSuggestionFrequency", DEFAULT_MIN_SUGGESTION_FREQ); // the maximum length of a combined term final Integer maxCombineLength = getOrDefault(args, "maxCombineWordLength", DEFAULT_MAX_COMBINE_LENGTH); // the minimum break term length final Integer minBreakLength = getOrDefault(args, "minBreakLength", DEFAULT_MIN_BREAK_LENGTH); // the index "dictionary" field to verify compounds / constituents final String indexField = (String) args.get("dictionaryField"); // terms triggering a reversal of the surrounding compound, e.g. "tasche AUS samt" -> samttasche final List<String> reverseCompoundTriggerWords = (List<String>) args.get("reverseCompoundTriggerWords"); final Integer maxDecompoundExpansions = getOrDefault(args, "decompound.maxExpansions", DEFAULT_MAX_DECOMPOUND_EXPANSIONS); if (maxDecompoundExpansions < 0) { throw new IllegalArgumentException("decompound.maxExpansions >= 0 expected. Found: " + maxDecompoundExpansions); } final boolean verifyDecompoundCollation = getOrDefault(args, "decompound.verifyCollation", Boolean.FALSE); // define whether we should always try to add a reverse compound final boolean alwaysAddReverseCompounds = getOrDefault(args, "alwaysAddReverseCompounds", Boolean.FALSE); // the indexReader has to be supplied on a per-request basis from a request thread-local final Supplier<IndexReader> indexReaderSupplier = () -> SolrRequestInfo.getRequestInfo().getReq().getSearcher().getIndexReader(); return new querqy.lucene.contrib.rewrite.WordBreakCompoundRewriterFactory(indexReaderSupplier, indexField, minSuggestionFreq, maxCombineLength, minBreakLength, reverseCompoundTriggerWords, alwaysAddReverseCompounds, maxDecompoundExpansions, verifyDecompoundCollation); }
@Override public void process(ResponseBuilder rb) throws IOException { if(isEnabled(rb)){ long startTime = System.currentTimeMillis(); SolrParams params = rb.req.getParams(); int topN = getTopN(params); boolean binary = getBinary(params); boolean logTfs = getLogTfs(params); boolean includeExisting = getIncludeExisting(params); final SolrIndexSearcher searcher = rb.req.getSearcher(); IndexReader ir = searcher.getIndexReader(); Analyzer analyzer = searcher.getSchema().getIndexAnalyzer(); DocListAndSet docs = rb.getResults(); DocIterator iterator = docs.docList.iterator(); String uniqueKeyField = searcher.getSchema().getUniqueKeyField().getName(); NamedList<NamedList<Double>> topPredictions = new NamedList<NamedList<Double>>(); while(iterator.hasNext()) { int docNum = iterator.nextDoc(); Map<String, Map<String,Integer>> tf = getFieldTermFrequencyCounts(fields, ir, analyzer, docNum); NamedList<Double> predictions = predict(tf, topN, binary, logTfs, includeExisting); String uniqueFieldValue = getUniqueKeyFieldValue(ir, analyzer, uniqueKeyField, docNum); topPredictions.add(String.format("%s:%s", uniqueKeyField, uniqueFieldValue), predictions); } long duration = System.currentTimeMillis() - startTime; NamedList<Object> results = new NamedList<Object>(); results.add("Time", duration); results.add("values", topPredictions); rb.rsp.add(getPrefix(), results); } }
this.reader = searcher.getIndexReader(); this.uniqueKeyField = uniqueKeyField; this.needDocSet = params.getBool(FacetParams.FACET,false);