public void addFixer(final DataFix fix) { final int version = DataFixUtils.getVersion(fix.getVersionKey()); if (version > dataVersion) { LOGGER.warn("Ignored fix registered for version: {} as the DataVersion of the game is: {}", version, dataVersion); return; } globalList.add(fix); fixerVersions.add(fix.getVersionKey()); }
/** Performs the cache operations for the given key. */ private void process(long key) { IntPriorityQueue times = accessTimes.get(key); int lastAccess = times.dequeueInt(); boolean found = data.remove(lastAccess); if (times.isEmpty()) { data.add(infiniteTimestamp--); accessTimes.remove(key); } else { data.add(times.firstInt()); } if (found) { policyStats.recordHit(); } else { policyStats.recordMiss(); if (data.size() > maximumSize) { evict(); } } }
if ( indexedFields.isEmpty() ) { if ( factory.fieldType( i ) != FieldType.VIRTUAL || virtualDocumentResolvers.containsKey( i ) ) indexedFields.add( i ); final int[] indexedField = indexedFields.toIntArray(); final String[] basenameField = new String[ indexedField.length ]; for( int i = 0; i < indexedField.length; i++ ) basenameField[ i ] = basename + "-" + factory.fieldName( indexedField[ i ] );
/** Sets the indexed fields to those provided (default: all fields, but see {@link #indexedFields}). * * <p>This is a utility method that provides a way to set {@link #indexedFields} in a chainable way. * * @param field a list of fields to be indexed, that will <em>replace</em> the current values in {@link #indexedFields}. * @return this index builder. * @see IndexBuilder#indexedFields */ public IndexBuilder indexedFields( int... field ) { indexedFields.clear(); for( int f: field ) indexedFields.add( f ); return this; }
@SuppressWarnings("unchecked") public int nextInt() { if ( !hasNext() ) throw new NoSuchElementException(); ObjectList<Scan.VirtualDocumentFragment> vdf; try { vdf = (ObjectList<VirtualDocumentFragment>)cachedDocument.content( virtualField ); } catch ( IOException exc1 ) { throw new RuntimeException( exc1 ); } succ.clear(); resolver.context( cachedDocument ); ObjectIterator<VirtualDocumentFragment> it = vdf.iterator(); while ( it.hasNext() ) { int successor = resolver.resolve( it.next().documentSpecifier() ); if ( successor >= 0 ) succ.add( successor ); } cachedSuccessors = succ.toIntArray(); // Get ready for the next request try { cachedDocument.close(); cachedDocument = documentIterator.nextDocument(); } catch ( IOException e ) { throw new RuntimeException( e ); } return cachedDocumentNumber++; }
/** Checks whether this interval is equal to another set of integers. * * @param o an object. * @return true if <code>o</code> is an ordered set of integer containing * the same element of this interval in the same order, or if <code>o</code> * is a set of integers containing the same elements of this interval. */ public boolean equals( final Object o ) { if ( o instanceof Interval ) return ((Interval)o).left == left && ((Interval)o).right == right; else if ( o instanceof IntSortedSet ) { // For sorted sets, we require the same order IntSortedSet s = (IntSortedSet) o; if ( s.size() != length() ) return false; int n = length(); IntIterator i = iterator(), j = s.iterator(); while( n-- != 0 ) if ( i.nextInt() != j.nextInt() ) return false; return true; } else if ( o instanceof IntSet ) { // For sets, we just require the same elements IntSet s = (IntSet) o; if ( s.size() != length() ) return false; int n = length(); IntIterator i = iterator(); while( n-- != 0 ) if ( ! s.contains( i.nextInt() ) ) return false; return true; } else return false; } }
public DataFixer build(final Executor executor) { final DataFixerUpper fixerUpper = new DataFixerUpper(new Int2ObjectAVLTreeMap<>(schemas), new ArrayList<>(globalList), new IntAVLTreeSet(fixerVersions)); final IntBidirectionalIterator iterator = fixerUpper.fixerVersions().iterator(); while (iterator.hasNext()) { final int versionKey = iterator.nextInt(); final Schema schema = schemas.get(versionKey); for (final String typeName : schema.types()) { CompletableFuture.runAsync(() -> { final Type<?> dataType = schema.getType(() -> typeName); final TypeRewriteRule rule = fixerUpper.getRule(DataFixUtils.getVersion(versionKey), dataVersion); dataType.rewrite(rule, DataFixerUpper.OPTIMIZATION_RULE); }, executor).exceptionally(e -> { LOGGER.error("Unable to build datafixers", e); Runtime.getRuntime().exit(1); return null; }); } } return fixerUpper; } }
private void serializeViewStore(final DataOutput out) throws IOException { GraphViewStore viewStore = model.store.viewStore; serialize(out, viewStore.length); serialize(out, viewStore.views); serialize(out, viewStore.garbageQueue.toIntArray()); }
@Override public CollapseCollector.Query getResult() { // Fill the priority queue wich the results of each segments final GroupQueue groupQueue = new GroupQueue(maxRows); leafCollectors.forEach(leaf -> leaf.reduce(groupQueue)); // Stores for each doc the number of collapsed documents final Int2IntLinkedOpenHashMap collapsedMap = new Int2IntLinkedOpenHashMap(groupQueue.groupLeaders.size()); // The DocID must be sorted and grouped by segment final Map<LeafReaderContext, IntSortedSet> sortedInts = new HashMap<>(); long collapsedCount = 0; for (final GroupLeader groupLeader : groupQueue.groupLeaders.values()) { sortedInts.computeIfAbsent(groupLeader.context, ctx -> new IntAVLTreeSet()).add(groupLeader.doc); collapsedMap.addTo(groupLeader.context.docBase + groupLeader.doc, groupLeader.collapsedCount); collapsedCount += groupLeader.collapsedCount; } // Now we can build the bitsets final Map<LeafReaderContext, RoaringDocIdSet> docIdMaps = new HashMap<>(); sortedInts.forEach((ctx, sortedInt) -> { final RoaringDocIdSet.Builder builder = new RoaringDocIdSet.Builder(ctx.reader().maxDoc()); sortedInt.forEach((IntConsumer) builder::add); docIdMaps.put(ctx, builder.build()); }); // Add empty bitset for unassigned leaf leafCollectors.forEach(leaf -> docIdMaps.putIfAbsent(leaf.context, new RoaringDocIdSet.Builder(leaf.context.reader().maxDoc()).build())); return new Query(new FilteredQuery(docIdMaps), collapsedMap, collapsedCount); }
@Override public int enabledCount() { return this.max_size - this.free.size(); }
/** * Returns the minimal range that * {@linkplain IntRange#contains(int) contains} all of the given values. * The returned range is {@linkplain BoundType#CLOSED closed} on both ends. * * @throws ClassCastException if the parameters are not <i>mutually * comparable</i> * @throws NoSuchElementException if {@code values} is empty * @throws NullPointerException if any of {@code values} is null * @since 14.0 */ public static IntRange encloseAll(IntCollection values) { checkNotNull(values); if (values instanceof IntSortedSet) { IntSortedSet setValues = (IntSortedSet) values; return closed(setValues.firstInt(), setValues.lastInt()); } IntIterator valueIterator = values.iterator(); int min = checkNotNull(valueIterator.next()); int max = min; while (valueIterator.hasNext()) { int value = checkNotNull(valueIterator.next()); min = Integer.min(min, value); max = Integer.max(max, value); } return closed(min, max); }
private List<KmerPathNode> lengthSplit(IntSortedSet startPositions, List<KmerPathNode> path) { List<KmerPathNode> result = new ArrayList<KmerPathNode>(startPositions.size()); for (int i = 0; i < path.size(); i++) { KmerPathNode pn = path.get(i); // break node internally for (int breakStartPosition : startPositions.subSet(pn.firstStart() + 1, pn.firstStart() + pn.length())) { int breakLength = breakStartPosition - pn.firstStart(); KmerPathNode split = lengthSplit(pn, breakLength); result.add(split); } result.add(pn); } return result; } private KmerPathNode lengthSplit(KmerPathNode node, int length) {
@Override public int firstInt() { synchronized (sync) { return sortedSet.firstInt(); } } @Override
@Override public int lastInt() { synchronized (sync) { return sortedSet.lastInt(); } } /**
public void clear() { for (int i = 0; i < length; i++) { Octant leaf = leaves[i]; if (leaf != null) { leaf.clear(); } } leaves = new Octant[0]; leavesCount = 0; length = 0; garbageQueue.clear(); selectedLeaves.clear(); visibleLeaves = 0; }