congrats Icon
New! Announcing Tabnine Chat Beta
Learn More
Tabnine Logo
IndexReader.maxDoc
Code IndexAdd Tabnine to your IDE (free)

How to use
maxDoc
method
in
org.apache.lucene.index.IndexReader

Best Java code snippets using org.apache.lucene.index.IndexReader.maxDoc (Showing top 20 results out of 918)

origin: neo4j/neo4j

@Override
public long maxCount()
{
  return reader.maxDoc();
}
origin: querydsl/querydsl

  private int maxDoc() throws IOException {
    return searcher.getIndexReader().maxDoc();
  }
}
origin: querydsl/querydsl

  private int maxDoc() throws IOException {
    return searcher.getIndexReader().maxDoc();
  }
}
origin: org.apache.lucene/lucene-core

/** Returns the number of deleted documents. */
public final int numDeletedDocs() {
 return maxDoc() - numDocs();
}
origin: neo4j/neo4j

  private DocIdSetIterator iterateAllDocs()
  {
    Bits liveDocs = MultiFields.getLiveDocs( reader );
    DocIdSetIterator allDocs = DocIdSetIterator.all( reader.maxDoc() );
    if ( liveDocs == null )
    {
      return allDocs;
    }

    return new FilteredDocIdSetIterator( allDocs )
    {
      @Override
      protected boolean match( int doc )
      {
        return liveDocs.get( doc );
      }
    };
  }
}
origin: org.apache.lucene/lucene-core

private TopFieldDocs searchAfter(FieldDoc after, Query query, int numHits, Sort sort,
  boolean doDocScores, boolean doMaxScore) throws IOException {
 final int limit = Math.max(1, reader.maxDoc());
 if (after != null && after.doc >= limit) {
  throw new IllegalArgumentException("after.doc exceeds the number of documents in the reader: after.doc="
    + after.doc + " limit=" + limit);
 }
 final int cappedNumHits = Math.min(numHits, limit);
 final Sort rewrittenSort = sort.rewrite(this);
 final CollectorManager<TopFieldCollector, TopFieldDocs> manager = new CollectorManager<TopFieldCollector, TopFieldDocs>() {
  @Override
  public TopFieldCollector newCollector() throws IOException {
   final boolean fillFields = true;
   // TODO: don't pay the price for accurate hit counts by default
   return TopFieldCollector.create(rewrittenSort, cappedNumHits, after, fillFields, doDocScores, doMaxScore, true);
  }
  @Override
  public TopFieldDocs reduce(Collection<TopFieldCollector> collectors) throws IOException {
   final TopFieldDocs[] topDocs = new TopFieldDocs[collectors.size()];
   int i = 0;
   for (TopFieldCollector collector : collectors) {
    topDocs[i++] = collector.topDocs();
   }
   return TopDocs.merge(rewrittenSort, 0, cappedNumHits, topDocs, true);
  }
 };
 return search(query, manager);
}
origin: org.apache.lucene/lucene-core

/** Check whether this segment is eligible for caching, regardless of the query. */
private boolean shouldCache(LeafReaderContext context) throws IOException {
 return cacheEntryHasReasonableWorstCaseSize(ReaderUtil.getTopLevelContext(context).reader().maxDoc())
   && leavesToCache.test(context);
}
origin: org.apache.lucene/lucene-core

/** This method may return null if the field does not exist or if it has no terms. */
public static Terms getTerms(IndexReader r, String field) throws IOException {
 final List<LeafReaderContext> leaves = r.leaves();
 if (leaves.size() == 1) {
  return leaves.get(0).reader().terms(field);
 }
 final List<Terms> termsPerLeaf = new ArrayList<>(leaves.size());
 final List<ReaderSlice> slicePerLeaf = new ArrayList<>(leaves.size());
 for (int leafIdx = 0; leafIdx < leaves.size(); leafIdx++) {
  LeafReaderContext ctx = leaves.get(leafIdx);
  Terms subTerms = ctx.reader().terms(field);
  if (subTerms != null) {
   termsPerLeaf.add(subTerms);
   slicePerLeaf.add(new ReaderSlice(ctx.docBase, r.maxDoc(), leafIdx - 1));
  }
 }
 if (termsPerLeaf.size() == 0) {
  return null;
 } else {
  return new MultiTerms(termsPerLeaf.toArray(Terms.EMPTY_ARRAY),
    slicePerLeaf.toArray(ReaderSlice.EMPTY_ARRAY));
 }
}

origin: org.apache.lucene/lucene-core

 private IndexReaderContext build(CompositeReaderContext parent, IndexReader reader, int ord, int docBase) {
  if (reader instanceof LeafReader) {
   final LeafReader ar = (LeafReader) reader;
   final LeafReaderContext atomic = new LeafReaderContext(parent, ar, ord, docBase, leaves.size(), leafDocBase);
   leaves.add(atomic);
   leafDocBase += reader.maxDoc();
   return atomic;
  } else {
   final CompositeReader cr = (CompositeReader) reader;
   final List<? extends IndexReader> sequentialSubReaders = cr.getSequentialSubReaders();
   final List<IndexReaderContext> children = Arrays.asList(new IndexReaderContext[sequentialSubReaders.size()]);
   final CompositeReaderContext newParent;
   if (parent == null) {
    newParent = new CompositeReaderContext(cr, children, leaves);
   } else {
    newParent = new CompositeReaderContext(parent, cr, ord, docBase, children);
   }
   int newDocBase = 0;
   for (int i = 0, c = sequentialSubReaders.size(); i < c; i++) {
    final IndexReader r = sequentialSubReaders.get(i);
    children.set(i, build(newParent, r, i, newDocBase));
    newDocBase += r.maxDoc();
   }
   assert newDocBase == cr.maxDoc();
   return newParent;
  }
 }
}
origin: neo4j/neo4j

private static PartitionSearcher createPartitionSearcher( int maxDoc, int partition, int maxSize )
    throws IOException
{
  PartitionSearcher partitionSearcher = mock( PartitionSearcher.class );
  IndexSearcher indexSearcher = mock( IndexSearcher.class );
  IndexReader indexReader = mock( IndexReader.class );
  when(partitionSearcher.getIndexSearcher()).thenReturn( indexSearcher );
  when( indexSearcher.getIndexReader() ).thenReturn( indexReader );
  when( indexReader.maxDoc() ).thenReturn( maxDoc );
  when( indexSearcher.doc( 0 ) ).thenReturn( createDocument( uniqueDocValue( 1, partition, maxSize ) ) );
  when( indexSearcher.doc( 1 ) ).thenReturn( createDocument( uniqueDocValue( 2, partition, maxSize ) ) );
  when( indexSearcher.doc( 2 ) ).thenReturn( createDocument( uniqueDocValue( 3, partition, maxSize ) ) );
  return partitionSearcher;
}
origin: apache/nifi

long getMaxEventId(final String partitionName) {
  final List<File> allDirectories = getDirectoryManager().getDirectories(0L, Long.MAX_VALUE, partitionName);
  if (allDirectories.isEmpty()) {
    return -1L;
  }
  Collections.sort(allDirectories, DirectoryUtils.NEWEST_INDEX_FIRST);
  for (final File directory : allDirectories) {
    final EventIndexSearcher searcher;
    try {
      searcher = indexManager.borrowIndexSearcher(directory);
    } catch (final IOException ioe) {
      logger.warn("Unable to read from Index Directory {}. Will assume that the index is incomplete and not consider this index when determining max event ID", directory);
      continue;
    }
    try {
      final IndexReader reader = searcher.getIndexSearcher().getIndexReader();
      final int maxDocId = reader.maxDoc() - 1;
      final Document document = reader.document(maxDocId);
      final long eventId = document.getField(SearchableFields.Identifier.getSearchableFieldName()).numericValue().longValue();
      logger.info("Determined that Max Event ID indexed for Partition {} is approximately {} based on index {}", partitionName, eventId, directory);
      return eventId;
    } catch (final IOException ioe) {
      logger.warn("Unable to search Index Directory {}. Will assume that the index is incomplete and not consider this index when determining max event ID", directory, ioe);
    } finally {
      indexManager.returnIndexSearcher(searcher);
    }
  }
  return -1L;
}
origin: querydsl/querydsl

private long innerCount() {
  try {
    final int maxDoc = searcher.getIndexReader().maxDoc();
    if (maxDoc == 0) {
      return 0;
    }
    TotalHitCountCollector collector = new TotalHitCountCollector();
    searcher.search(createQuery(), getFilter(), collector);
    return collector.getTotalHits();
  } catch (IOException e) {
    throw new QueryException(e);
  } catch (IllegalArgumentException e) {
    throw new QueryException(e);
  }
}
origin: querydsl/querydsl

private long innerCount() {
  try {
    final int maxDoc = searcher.getIndexReader().maxDoc();
    if (maxDoc == 0) {
      return 0;
    }
    TotalHitCountCollector collector = new TotalHitCountCollector();
    searcher.search(createQuery(), getFilter(), collector);
    return collector.getTotalHits();
  } catch (IOException e) {
    throw new QueryException(e);
  } catch (IllegalArgumentException e) {
    throw new QueryException(e);
  }
}
origin: org.apache.lucene/lucene-core

  starts[i] = ctx.docBase;
 starts[size] = reader.maxDoc();
 return new MultiBits(liveDocs, starts, true);
} else {
origin: tjake/Solandra

 @Override
 protected Object createValue(IndexReader reader, Entry entryKey)
 throws IOException {
  Entry entry = entryKey;
  String field = entry.field;
    if (reader.maxDoc() == reader.docFreq(new Term(field))) {
   return DocIdSet.EMPTY_DOCIDSET;
  }
    OpenBitSet res = new OpenBitSet(reader.maxDoc());
  TermDocs termDocs = reader.termDocs();
  TermEnum termEnum = reader.terms (new Term (field));
  try {
   do {
    Term term = termEnum.term();
    if (term==null || term.field() != field) break;
    termDocs.seek (termEnum);
    while (termDocs.next()) {
     res.fastSet(termDocs.doc());
    }
   } while (termEnum.next());
  } finally {
   termDocs.close();
   termEnum.close();
  }
  res.flip(0, reader.maxDoc());
  return res;
 }
}
origin: org.apache.lucene/lucene-core

 @Override
 public boolean test(LeafReaderContext context) {
  final int maxDoc = context.reader().maxDoc();
  if (maxDoc < minSize) {
   return false;
  }
  final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context);
  final float sizeRatio = (float) context.reader().maxDoc() / topLevelContext.reader().maxDoc();
  return sizeRatio >= minSizeRatio;
 }
}
origin: tjake/Solandra

public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
  OpenBitSet result = new OpenBitSet(reader.maxDoc());
origin: tjake/Solandra

  @Override
  protected Object createValue(IndexReader reader, Entry entryKey) throws IOException
  {
    String field = StringHelper.intern(entryKey.field);
    final String[] retArray = new String[reader.maxDoc()];
    Collection<IColumn> fcEntries = getFieldCacheEntries(reader, field);
    for (IColumn col : fcEntries)
    {
      if (col instanceof DeletedColumn)
        continue;
      int docId = CassandraUtils.readVInt(col.name());
      String val = ByteBufferUtil.string(col.value());
      retArray[docId] = val;
    }
    return retArray;
  }
}
origin: org.apache.lucene/lucene-core

 /**
  * Returns {@link CollectionStatistics} for a field.
  * 
  * This can be overridden for example, to return a field's statistics
  * across a distributed collection.
  * @lucene.experimental
  */
 public CollectionStatistics collectionStatistics(String field) throws IOException {
  final int docCount;
  final long sumTotalTermFreq;
  final long sumDocFreq;

  assert field != null;
  
  Terms terms = MultiFields.getTerms(reader, field);
  if (terms == null) {
   docCount = 0;
   sumTotalTermFreq = 0;
   sumDocFreq = 0;
  } else {
   docCount = terms.getDocCount();
   sumTotalTermFreq = terms.getSumTotalTermFreq();
   sumDocFreq = terms.getSumDocFreq();
  }

  return new CollectionStatistics(field, reader.maxDoc(), docCount, sumTotalTermFreq, sumDocFreq);
 }
}
origin: org.apache.lucene/lucene-core

public TermWeight(IndexSearcher searcher, boolean needsScores,
  float boost, TermContext termStates) throws IOException {
 super(TermQuery.this);
 if (needsScores && termStates == null) {
  throw new IllegalStateException("termStates are required when scores are needed");
 }
 this.needsScores = needsScores;
 this.termStates = termStates;
 this.similarity = searcher.getSimilarity(needsScores);
 final CollectionStatistics collectionStats;
 final TermStatistics termStats;
 if (needsScores) {
  collectionStats = searcher.collectionStatistics(term.field());
  termStats = searcher.termStatistics(term, termStates);
 } else {
  // we do not need the actual stats, use fake stats with docFreq=maxDoc and ttf=-1
  final int maxDoc = searcher.getIndexReader().maxDoc();
  collectionStats = new CollectionStatistics(term.field(), maxDoc, -1, -1, -1);
  termStats = new TermStatistics(term.bytes(), maxDoc, -1);
 }
  this.stats = similarity.computeWeight(boost, collectionStats, termStats);
}
org.apache.lucene.indexIndexReadermaxDoc

Javadoc

Returns one greater than the largest possible document number. This may be used to, e.g., determine how big to allocate an array which will have an element for every document number in an index.

Popular methods of IndexReader

  • close
    Closes files associated with this index. Also saves any new deletions to disk. No other methods shou
  • numDocs
    Returns the number of documents in this index.
  • document
  • open
  • docFreq
    Returns the number of documents containing theterm. This method returns 0 if the term or field does
  • leaves
    Returns the reader's leaves, or itself if this reader is atomic. This is a convenience method callin
  • terms
    Returns an enumeration of all terms starting at a given term. If the given term does not exist, the
  • termDocs
    Returns an enumeration of all the documents which containterm. For each document, the document numbe
  • indexExists
    Returns true if an index exists at the specified directory. If the directory does not exist or if th
  • hasDeletions
    Returns true if any documents have been deleted. Implementers should consider overriding this method
  • isDeleted
    Returns true if document n has been deleted
  • decRef
    Expert: decreases the refCount of this IndexReader instance. If the refCount drops to 0, then this r
  • isDeleted,
  • decRef,
  • getTermFreqVector,
  • numDeletedDocs,
  • getContext,
  • getRefCount,
  • getTermVector,
  • getTermVectors,
  • incRef

Popular in Java

  • Reading from database using SQL prepared statement
  • runOnUiThread (Activity)
  • orElseThrow (Optional)
    Return the contained value, if present, otherwise throw an exception to be created by the provided s
  • getSystemService (Context)
  • IOException (java.io)
    Signals a general, I/O-related error. Error details may be specified when calling the constructor, a
  • MessageFormat (java.text)
    Produces concatenated messages in language-neutral way. New code should probably use java.util.Forma
  • Locale (java.util)
    Locale represents a language/country/variant combination. Locales are used to alter the presentatio
  • NoSuchElementException (java.util)
    Thrown when trying to retrieve an element past the end of an Enumeration or Iterator.
  • Stream (java.util.stream)
    A sequence of elements supporting sequential and parallel aggregate operations. The following exampl
  • BoxLayout (javax.swing)
  • Best IntelliJ plugins
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now