rb.req.getSearcher().getReader().document(docIds.get(0), selector);
public void collect(int doc, float score) { try { searcher.getReader().deleteDocument(doc); deleted++; } catch (IOException e) { // don't try to close the searcher on failure for now... // try { closeSearcher(); } catch (Exception ee) { SolrException.log(log,ee); } throw new SolrException( SolrException.ErrorCode.SERVER_ERROR,"Error deleting doc# "+doc,e,false); } } }
private AggregateCollapseCollector(Map<AggregateField, AggregateFunction> functions, SolrIndexSearcher searcher) { super(searcher); this.functions = functions; for (AggregateField aggregateField : functions.keySet()) { try { String fieldName = aggregateField.getFieldName(); fieldCaches.put(fieldName, FieldCache.DEFAULT.getStringIndex(searcher.getReader(), fieldName)); fieldTypes.put(fieldName, searcher.getSchema().getFieldType(fieldName)); } catch (IOException e) { throw new RuntimeException(e); } } }
/** * {@inheritDoc} */ public DocumentCollapseResult collapse(Query mainQuery, List<Query> filterQueries, Sort sort) throws IOException { long startTime = System.currentTimeMillis(); doQuery(mainQuery, filterQueries, sort); timeCreateUncollapedDocset = System.currentTimeMillis() - startTime; fieldValues = FieldCache.DEFAULT.getStringIndex(searcher.getReader(), collapseField); doCollapsing(uncollapsedDocSet, fieldValues); return createDocumentCollapseResult(); }
private void SearchDocumentsTypeII(SolrDocumentList results, SolrIndexSearcher searcher, String q, UserPermissions up, int ndocs, SolrQueryRequest req, Map<String, SchemaField> fields, Set<Integer> alreadyFound) throws IOException, ParseException { BooleanQuery bq = new BooleanQuery(); String permLvl = "PermissionLevel:" + up.getPermissionLevel(); QParser parser = QParser.getParser(permLvl, null, req); bq.add(parser.getQuery(), Occur.MUST); Filter filter = CachingWrapperFilter(new QueryWrapperFilter(bq)); QueryParser qp = new QueryParser(q, new StandardAnalyzer()); Query query = qp.parse(q); append (results, searcher.search( query, filter, 50).scoreDocs, alreadyFound, fields, new HashMap<String,Object>(), 0, searcher.getReader(), true); }
/** Returns a dictionary to be used when building the spell-checker index. * Override the method for custom dictionary */ protected Dictionary getDictionary(SolrQueryRequest req) { float threshold; try { threshold = req.getParams().getFloat(THRESHOLD, DEFAULT_DICTIONARY_THRESHOLD); } catch (NumberFormatException e) { throw new RuntimeException("Threshold must be a valid positive float", e); } IndexReader indexReader = req.getSearcher().getReader(); return new HighFrequencyDictionary(indexReader, termSourceField, threshold); }
public void build(SolrCore core, SolrIndexSearcher searcher) { IndexReader reader = null; try { if (sourceLocation == null) { // Load from Solr's index reader = searcher.getReader(); } else { // Load from Lucene index at given sourceLocation reader = this.reader; } // Create the dictionary dictionary = new HighFrequencyDictionary(reader, field, threshold); spellChecker.clearIndex(); spellChecker.indexDictionary(dictionary); } catch (IOException e) { throw new RuntimeException(e); } }
private long[] getIndexVersion() { long version[] = new long[2]; RefCounted<SolrIndexSearcher> searcher = core.getSearcher(); try { version[0] = searcher.get().getReader().getIndexCommit().getVersion(); version[1] = searcher.get().getReader().getIndexCommit().getGeneration(); } catch (IOException e) { LOG.warn("Unable to get index version : ", e); } finally { searcher.decref(); } return version; }
protected int deleteInIndex(String indexedId) throws IOException { if (idField == null) throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"Operation requires schema to have a unique key field"); closeWriter(); openSearcher(); IndexReader ir = searcher.getReader(); TermDocs tdocs = null; int num=0; try { Term term = new Term(idField.getName(), indexedId); num = ir.deleteDocuments(term); if (core.log.isTraceEnabled()) { core.log.trace( core.getLogId()+"deleted " + num + " docs matching id " + idFieldType.indexedToReadable(indexedId)); } } finally { try { if (tdocs != null) tdocs.close(); } catch (Exception e) {} } return num; }
public void newSearcher(SolrIndexSearcher newSearcher, SolrIndexSearcher currentSearcher) { if (currentSearcher == null) { // firstSearcher event try { LOG.info("Loading spell index for spellchecker: " + checker.getDictionaryName()); checker.reload(); } catch (IOException e) { log.error( "Exception in reloading spell check index for spellchecker: " + checker.getDictionaryName(), e); } } else { // newSearcher event if (buildOnCommit) { buildSpellIndex(newSearcher); } else if (buildOnOptimize) { if (newSearcher.getReader().isOptimized()) { buildSpellIndex(newSearcher); } else { LOG.info("Index is not optimized therefore skipping building spell check index for: " + checker.getDictionaryName()); } } } }
/** * Calculates a tag for the ETag header. * * @param solrReq * @return a tag */ public static String calcEtag(final SolrQueryRequest solrReq) { final SolrCore core = solrReq.getCore(); final long currentIndexVersion = solrReq.getSearcher().getReader().getVersion(); EtagCacheVal etagCache = etagCoreCache.get(core); if (null == etagCache) { final String etagSeed = core.getSolrConfig().getHttpCachingConfig().getEtagSeed(); etagCache = new EtagCacheVal(etagSeed); etagCoreCache.put(core, etagCache); } return etagCache.calcEtag(currentIndexVersion); }
/** * Return a QueryScorer suitable for this Query and field. * @param query The current query * @param fieldName The name of the field * @param request The SolrQueryRequest */ private Scorer getQueryScorer(Query query, String fieldName, SolrQueryRequest request) { boolean reqFieldMatch = request.getParams().getFieldBool(fieldName, HighlightParams.FIELD_MATCH, false); if (reqFieldMatch) { return new QueryTermScorer(query, request.getSearcher().getReader(), fieldName); } else { return new QueryTermScorer(query); } }
protected boolean existsInIndex(String indexedId) throws IOException { if (idField == null) throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"Operation requires schema to have a unique key field"); closeWriter(); openSearcher(); IndexReader ir = searcher.getReader(); TermDocs tdocs = null; boolean exists=false; try { tdocs = ir.termDocs(idTerm(indexedId)); if (tdocs.next()) exists=true; } finally { try { if (tdocs != null) tdocs.close(); } catch (Exception e) {} } return exists; }
private void doSnapShoot(SolrParams params, SolrQueryResponse rsp, SolrQueryRequest req) { try { IndexDeletionPolicyWrapper delPolicy = core.getDeletionPolicy(); IndexCommit indexCommit = delPolicy.getLatestCommit(); // race? delPolicy.setReserveDuration(indexCommit.getVersion(), reserveCommitDuration); if(indexCommit == null) { indexCommit = req.getSearcher().getReader().getIndexCommit(); } if (indexCommit != null) { new SnapShooter(core, params.get("location")).createSnapAsync(indexCommit, this); } } catch (Exception e) { LOG.warn("Exception during creating a snapshot", e); rsp.add("exception", e); } }
/** * Calculate the appropriate last-modified time for Solr relative the current request. * * @param solrReq * @return the timestamp to use as a last modified time. */ public static long calcLastModified(final SolrQueryRequest solrReq) { final SolrCore core = solrReq.getCore(); final SolrIndexSearcher searcher = solrReq.getSearcher(); final LastModFrom lastModFrom = core.getSolrConfig().getHttpCachingConfig().getLastModFrom(); long lastMod; try { // assume default, change if needed (getOpenTime() should be fast) lastMod = LastModFrom.DIRLASTMOD == lastModFrom ? IndexReader.lastModified(searcher.getReader().directory()) : searcher.getOpenTime(); } catch (IOException e) { // we're pretty freaking screwed if this happens throw new SolrException(ErrorCode.SERVER_ERROR, e); } // Get the time where the searcher has been opened // We get rid of the milliseconds because the HTTP header has only // second granularity return lastMod - (lastMod % 1000L); }
protected NamedList<Object> getCoreStatus(CoreContainer cores, String cname) throws IOException { NamedList<Object> info = new SimpleOrderedMap<Object>(); SolrCore core = cores.getCore(cname); if (core != null) { try { info.add("name", core.getName()); info.add("instanceDir", normalizePath(core.getResourceLoader().getInstanceDir())); info.add("dataDir", normalizePath(core.getDataDir())); info.add("startTime", new Date(core.getStartTime())); info.add("uptime", System.currentTimeMillis() - core.getStartTime()); RefCounted<SolrIndexSearcher> searcher = core.getSearcher(); try { info.add("index", LukeRequestHandler.getIndexInfo(searcher.get().getReader(), false)); } finally { searcher.decref(); } } finally { core.close(); } } return info; }
false); NamedList response = new SimpleOrderedMap(); IndexReader reader = rb.req.getSearcher().getReader(); boolean collate = params.getBool(SPELLCHECK_COLLATE, false); SpellingResult spellingResult = spellChecker.getSuggestions(tokens,
highlightQuery = rewrite ? highlightQuery.rewrite(req.getSearcher().getReader()) : highlightQuery;
public MoreLikeThisHelper( SolrParams params, SolrIndexSearcher searcher ) { this.searcher = searcher; this.reader = searcher.getReader(); this.uniqueKeyField = searcher.getSchema().getUniqueKeyField(); this.needDocSet = params.getBool(FacetParams.FACET,false); SolrParams required = params.required(); String[] fields = splitList.split( required.get(MoreLikeThisParams.SIMILARITY_FIELDS) ); if( fields.length < 1 ) { throw new SolrException( SolrException.ErrorCode.BAD_REQUEST, "MoreLikeThis requires at least one similarity field: "+MoreLikeThisParams.SIMILARITY_FIELDS ); } this.mlt = new MoreLikeThis( reader ); // TODO -- after LUCENE-896, we can use , searcher.getSimilarity() ); mlt.setFieldNames(fields); mlt.setAnalyzer( searcher.getSchema().getAnalyzer() ); // configurable params mlt.setMinTermFreq( params.getInt(MoreLikeThisParams.MIN_TERM_FREQ, MoreLikeThis.DEFAULT_MIN_TERM_FREQ)); mlt.setMinDocFreq( params.getInt(MoreLikeThisParams.MIN_DOC_FREQ, MoreLikeThis.DEFAULT_MIN_DOC_FREQ)); mlt.setMinWordLen( params.getInt(MoreLikeThisParams.MIN_WORD_LEN, MoreLikeThis.DEFAULT_MIN_WORD_LENGTH)); mlt.setMaxWordLen( params.getInt(MoreLikeThisParams.MAX_WORD_LEN, MoreLikeThis.DEFAULT_MAX_WORD_LENGTH)); mlt.setMaxQueryTerms( params.getInt(MoreLikeThisParams.MAX_QUERY_TERMS, MoreLikeThis.DEFAULT_MAX_QUERY_TERMS)); mlt.setMaxNumTokensParsed(params.getInt(MoreLikeThisParams.MAX_NUM_TOKENS_PARSED, MoreLikeThis.DEFAULT_MAX_NUM_TOKENS_PARSED)); mlt.setBoost( params.getBool(MoreLikeThisParams.BOOST, false ) ); boostFields = SolrPluginUtils.parseFieldBoosts(params.getParams(MoreLikeThisParams.QF)); }