/** * Sorts the given array using the {@link Comparator}. This method uses the Tim sort * algorithm, but falls back to binary sort for small arrays. */ public static <T> void timSort(T[] a, Comparator<? super T> comp) { timSort(a, 0, a.length, comp); }
/** * Sorts the given array in natural order. This method uses the Tim sort * algorithm, but falls back to binary sort for small arrays. */ public static <T extends Comparable<? super T>> void timSort(T[] a) { timSort(a, 0, a.length); }
public void sort() { // Tim sort performs well on already sorted arrays: if (count > 1) ArrayUtil.timSort(points, 0, count); }
/** * Sorts the given array slice in natural order. This method uses the Tim sort * algorithm, but falls back to binary sort for small arrays. * @param fromIndex start index (inclusive) * @param toIndex end index (exclusive) */ public static <T extends Comparable<? super T>> void timSort(T[] a, int fromIndex, int toIndex) { if (toIndex-fromIndex <= 1) return; timSort(a, fromIndex, toIndex, Comparator.naturalOrder()); }
BitSetConjunctionDISI(DocIdSetIterator lead, Collection<BitSetIterator> bitSetIterators) { this.lead = lead; assert bitSetIterators.size() > 0; this.bitSetIterators = bitSetIterators.toArray(new BitSetIterator[0]); // Put the least costly iterators first so that we exit as soon as possible ArrayUtil.timSort(this.bitSetIterators, (a, b) -> Long.compare(a.cost(), b.cost())); this.bitSets = new BitSet[this.bitSetIterators.length]; int minLen = Integer.MAX_VALUE; for (int i = 0; i < this.bitSetIterators.length; ++i) { BitSet bitSet = this.bitSetIterators[i].getBitSet(); this.bitSets[i] = bitSet; minLen = Math.min(minLen, bitSet.length()); } this.minLength = minLen; }
assert privateSegment == null || updates.deleteTerms.isEmpty() : "segment private packet should only have del queries"; Term termsArray[] = updates.deleteTerms.keySet().toArray(new Term[updates.deleteTerms.size()]); ArrayUtil.timSort(termsArray); PrefixCodedTerms.Builder builder = new PrefixCodedTerms.Builder(); for (Term term : termsArray) {
/** * Creates a new {@link TermInSetQuery} from the given collection of terms. */ public TermInSetQuery(String field, Collection<BytesRef> terms) { BytesRef[] sortedTerms = terms.toArray(new BytesRef[terms.size()]); // already sorted if we are a SortedSet with natural order boolean sorted = terms instanceof SortedSet && ((SortedSet<BytesRef>)terms).comparator() == null; if (!sorted) { ArrayUtil.timSort(sortedTerms); } PrefixCodedTerms.Builder builder = new PrefixCodedTerms.Builder(); BytesRefBuilder previous = null; for (BytesRef term : sortedTerms) { if (previous == null) { previous = new BytesRefBuilder(); } else if (previous.get().equals(term)) { continue; // deduplicate } builder.add(field, term); previous.copyBytes(term); } this.field = field; termData = builder.finish(); termDataHashCode = termData.hashCode(); }
ArrayUtil.timSort(scoreTerms, scoreTermSortByTermComp);
@Override public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException { MultiPostingsEnum docsEnum; // Can only reuse if incoming enum is also a MultiDocsEnum if (reuse != null && reuse instanceof MultiPostingsEnum) { docsEnum = (MultiPostingsEnum) reuse; // ... and was previously created w/ this MultiTermsEnum: if (!docsEnum.canReuse(this)) { docsEnum = new MultiPostingsEnum(this, subs.length); } } else { docsEnum = new MultiPostingsEnum(this, subs.length); } int upto = 0; ArrayUtil.timSort(top, 0, numTop, INDEX_COMPARATOR); for(int i=0;i<numTop;i++) { final TermsEnumWithSlice entry = top[i]; assert entry.index < docsEnum.subPostingsEnums.length: entry.index + " vs " + docsEnum.subPostingsEnums.length + "; " + subs.length; final PostingsEnum subPostingsEnum = entry.terms.postings(docsEnum.subPostingsEnums[entry.index], flags); assert subPostingsEnum != null; docsEnum.subPostingsEnums[entry.index] = subPostingsEnum; subDocs[upto].postingsEnum = subPostingsEnum; subDocs[upto].slice = entry.subSlice; upto++; } return docsEnum.reset(subDocs, upto); }
ArrayUtil.timSort(postingsFreqs); return new ExactPhraseMatcher(postingsFreqs, totalMatchCost);
ArrayUtil.timSort(postingsFreqs);
private Term[] equalsTerms() { if (terms.length == 1) { return terms; } if (equalTerms == null) { // sort the terms to make sure equals and hashCode are consistent // this should be a very small cost and equivalent to a HashSet but less object creation final Term[] t = new Term[terms.length]; System.arraycopy(terms, 0, t, 0, terms.length); ArrayUtil.timSort(t); equalTerms = t; } return equalTerms; }
ArrayUtil.timSort(postingsFreqs); return new ExactPhraseMatcher(postingsFreqs, totalMatchCost);
ArrayUtil.timSort(entries, (o1, o2) -> { String left = o1.getValue(); String right = o2.getValue();
void sendFiles(Store store, StoreFileMetaData[] files, Function<StoreFileMetaData, OutputStream> outputStreamFactory) throws Exception { store.incRef(); try { ArrayUtil.timSort(files, Comparator.comparingLong(StoreFileMetaData::length)); // send smallest first for (int i = 0; i < files.length; i++) { final StoreFileMetaData md = files[i];
/** * Sorts the given array using the {@link Comparator}. This method uses the Tim sort * algorithm, but falls back to binary sort for small arrays. */ public static <T> void timSort(T[] a, Comparator<? super T> comp) { timSort(a, 0, a.length, comp); }
/** * Sorts the given array in natural order. This method uses the Tim sort * algorithm, but falls back to binary sort for small arrays. */ public static <T extends Comparable<? super T>> void timSort(T[] a) { timSort(a, 0, a.length); }
StoreFileMetaData[] metadata = StreamSupport.stream(recoverySourceMetadata1.spliterator(), false).toArray(StoreFileMetaData[]::new); ArrayUtil.timSort(metadata, Comparator.comparingLong(StoreFileMetaData::length)); // check small files first for (StoreFileMetaData md : metadata) { cancellableThreads.checkForCancel();
public void finishTerm(long defaultWeight) throws IOException { ArrayUtil.timSort(surfaceFormsAndPayload, 0, count); int deduplicator = 0; analyzed.append((byte) 0); analyzed.setLength(analyzed.length() + 1); analyzed.grow(analyzed.length()); for (int i = 0; i < count; i++) { analyzed.setByteAt(analyzed.length() - 1, (byte) deduplicator++); Util.toIntsRef(analyzed.get(), scratchInts); SurfaceFormAndPayload candiate = surfaceFormsAndPayload[i]; long cost = candiate.weight == -1 ? encodeWeight(Math.min(Integer.MAX_VALUE, defaultWeight)) : candiate.weight; builder.add(scratchInts.get(), outputs.newPair(cost, candiate.payload)); } seenSurfaceForms.clear(); count = 0; }
int secondaryLength = 0; int primaryLength = primary.length; ArrayUtil.timSort(primary, comparator); final Set<ModelNode> throttledNodes = Collections.newSetFromMap(new IdentityHashMap<>()); do {