/** * This simply calls {@link #compact compact}. It is included for symmetry with other * collection classes. Note that the name of this method is somewhat misleading (which is why * we prefer <tt>compact</tt>) as the load factor may require capacity above and beyond the size * of this collection. * * @see #compact */ public final void trimToSize() { compact(); }
/** * Removes the last entry returned by the iterator. Invoking this method more than once for a * single entry will leave the underlying data structure in a confused state. */ public void remove() { if (_expectedSize != _hash.size()) { throw new ConcurrentModificationException(); } // Disable auto compaction during the remove. This is a workaround for bug 1642768. try { _hash.tempDisableAutoCompaction(); _hash.removeAt(_index); } finally { _hash.reenableAutoCompaction(false); } _expectedSize--; }
/** * Ensure that this hashtable has sufficient capacity to hold <tt>desiredCapacity</tt> * <b>additional</b> elements without requiring a rehash. This is a tuning method you can call * before doing a large insert. * * @param desiredCapacity an <code>int</code> value */ public void ensureCapacity(int desiredCapacity) { if (desiredCapacity > (_maxSize - size())) { rehash(PrimeFinder.nextPrime((int) Math.ceil( (desiredCapacity + size()) / _loadFactor) + 1)); computeMaxSize(capacity()); } }
/** * After an insert, this hook is called to adjust the size/free values of the set and to perform * rehashing if necessary. */ protected final void postInsertHook(boolean usedFreeSlot) { if (usedFreeSlot) { _free--; } // rehash whenever we exhaust the available space in the table if (++_size > _maxSize || _free == 0) { // choose a new capacity suited to the new state of the table // if we've grown beyond our maximum size, double capacity; // if we've exhausted the free spots, rehash to the same capacity, // which will free up any stale removed slots for reuse. int newCapacity = _size > _maxSize ? PrimeFinder.nextPrime(capacity() << 1) : capacity(); rehash(newCapacity); computeMaxSize(capacity()); } }
/** * initializes the hashtable to a prime capacity which is at least <tt>initialCapacity + * 1</tt>. * * @param initialCapacity an <code>int</code> value * @return the actual capacity chosen */ protected int setUp(int initialCapacity) { int capacity; capacity = PrimeFinder.nextPrime(initialCapacity); computeMaxSize(capacity); computeNextAutoCompactionAmount(initialCapacity); return capacity; }
/** * initializes the hashtable to a prime capacity which is at least <tt>initialCapacity + * 1</tt>. * * @param initialCapacity an <code>int</code> value * @return the actual capacity chosen */ protected int setUp(int initialCapacity) { int capacity; capacity = super.setUp(initialCapacity); _states = new byte[capacity]; return capacity; } } // TPrimitiveHash
public Object clone() { TPrimitiveHash h = (TPrimitiveHash) super.clone(); h._states = (byte[]) this._states.clone(); return h; }
/** * Empties the collection. */ public void clear() { _size = 0; _free = capacity(); }
/** * Delete the record at <tt>index</tt>. * * @param index an <code>int</code> value */ protected void removeAt(int index) { _states[index] = REMOVED; super.removeAt(index); }
@Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { super.readExternal(in); // VERSION in.readByte(); // HASHING STRATEGY //noinspection unchecked _hashingStrategy = (com.gigaspaces.internal.gnu.trove.TObjectHashingStrategy<T>) in.readObject(); } } // TObjectHash
/** * Compresses the hashtable to the minimum prime size (as defined by PrimeFinder) that will hold * all of the elements currently in the table. If you have done a lot of <tt>remove</tt> * operations and plan to do a lot of queries or insertions or iteration, it is a good idea to * invoke this method. Doing so will accomplish two things: * * <ol> <li> You'll free memory allocated to the table but no longer needed because of the * remove()s.</li> * * <li> You'll get better query/insert/iterator performance because there won't be any * <tt>REMOVED</tt> slots to skip over when probing for indices in the table.</li> </ol> */ public void compact() { // need at least one free spot for open addressing rehash(PrimeFinder.nextPrime((int) Math.ceil(size() / _loadFactor) + 1)); computeMaxSize(capacity()); // If auto-compaction is enabled, re-determine the compaction interval if (_autoCompactionFactor != 0) { computeNextAutoCompactionAmount(size()); } }
/** * initializes the Object set of this hash table. * * @param initialCapacity an <code>int</code> value * @return an <code>int</code> value */ protected int setUp(int initialCapacity) { int capacity; capacity = super.setUp(initialCapacity); _set = new Object[capacity]; Arrays.fill(_set, FREE); return capacity; }
/** * @return a shallow clone of this collection */ public TObjectHash<T> clone() { TObjectHash<T> h = (TObjectHash<T>) super.clone(); h._set = (Object[]) this._set.clone(); return h; }
protected int calculateGrownCapacity() { return capacity() << 1; }
protected void removeAt(int index) { _set[index] = REMOVED; super.removeAt(index); }
/** * Creates a new <code>THash</code> instance with a prime capacity at or near the minimum needed * to hold <tt>initialCapacity</tt> elements with load factor <tt>loadFactor</tt> without * triggering a rehash. * * @param initialCapacity an <code>int</code> value * @param loadFactor a <code>float</code> value */ public THash(int initialCapacity, float loadFactor) { super(); _loadFactor = loadFactor; // Through testing, the load factor (especially the default load factor) has been // found to be a pretty good starting auto-compaction factor. _autoCompactionFactor = loadFactor; setUp((int) Math.ceil(initialCapacity / loadFactor)); }
/** * Re-enable auto-compaction after it was disabled via {@link #tempDisableAutoCompaction()}. * * @param check_for_compaction True if compaction should be performed if needed before * returning. If false, no compaction will be performed. */ protected void reenableAutoCompaction(boolean check_for_compaction) { _autoCompactTemporaryDisable = false; if (check_for_compaction && _autoCompactRemovesRemaining <= 0 && _autoCompactionFactor != 0) { // Do the compact // NOTE: this will cause the next compaction interval to be calculated compact(); } }
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { // VERSION in.readByte(); // LOAD FACTOR float old_factor = _loadFactor; _loadFactor = in.readFloat(); // AUTO COMPACTION LOAD FACTOR _autoCompactionFactor = in.readFloat(); // If we change the laod factor from the default, re-setup if (old_factor != _loadFactor) { setUp((int) Math.ceil(DEFAULT_INITIAL_CAPACITY / _loadFactor)); } } }// THash
/** * Delete the record at <tt>index</tt>. Reduces the size of the collection by one. * * @param index an <code>int</code> value */ protected void removeAt(int index) { _size--; // If auto-compaction is enabled, see if we need to compact if (_autoCompactionFactor != 0) { _autoCompactRemovesRemaining--; if (!_autoCompactTemporaryDisable && _autoCompactRemovesRemaining <= 0) { // Do the compact // NOTE: this will cause the next compaction interval to be calculated compact(); } } }