@SuppressWarnings("resource") AutoCloseableLock writeLock() { return new AutoCloseableLock(writeLock).open(); }
@Override public void close() { acl.close(); }
private AutoCloseableLock sharedLock(byte[] key) { Preconditions.checkNotNull(key); final int hash = Arrays.hashCode(key); AutoCloseableLock lock = sharedLocks[Math.abs(hash % parallel)]; lock.open(); return lock; }
public RocksDBStore(String name, ColumnFamilyDescriptor family, ColumnFamilyHandle handle, RocksDB db, int stripes) { super(); this.family = family; this.name = name; this.db = db; this.parallel = stripes; this.handle = handle; this.sharedLocks = new AutoCloseableLock[stripes]; this.exclusiveLocks = new AutoCloseableLock[stripes]; for (int i = 0; i < stripes; i++) { ReadWriteLock core = new ReentrantReadWriteLock(); sharedLocks[i] = new AutoCloseableLock(core.readLock()); exclusiveLocks[i] = new AutoCloseableLock(core.writeLock()); } if (COLLECT_METRICS) { registerMetrics(); } }
private void exclusively(ExclusiveOperation operation) throws IOException { // Attempt to acquire all exclusive locks to limit concurrent writes occurring. ArrayList<AutoCloseableLock> acquiredLocks = new ArrayList<>(exclusiveLocks.length); for (int i = 0; i < exclusiveLocks.length; i++) { try { // We cannot ensure that all write locks can be acquired, so a best attempt must be made. // If lock is still held after waiting 3 seconds, continue with the lock acquisition and close. // Note: The data from the concurrent write cannot be guaranteed to be persisted on restart. if (exclusiveLocks[i].tryOpen(3L, TimeUnit.SECONDS) != null) { acquiredLocks.add(exclusiveLocks[i]); } } catch (InterruptedException e) { // Do nothing. } } try(DeferredException deferred = new DeferredException()) { try { operation.execute(deferred); } catch(RocksDBException e) { deferred.addException(e); } deferred.suppressingClose(AutoCloseables.all(acquiredLocks)); } catch (IOException e) { throw e; } catch (Exception e) { throw new IOException(e); } }
private AutoCloseableLock exclusiveLock(byte[] key) { Preconditions.checkNotNull(key); final int hash = Arrays.hashCode(key); AutoCloseableLock lock = exclusiveLocks[Math.abs(hash % parallel)]; lock.open(); return lock; }
@SuppressWarnings("resource") public AutoCloseableLock readLock() { return new AutoCloseableLock(readLock).open(); }
@Override public void close() throws Exception { try (AutoCloseableLock lock = exclusiveCloseLock.open()) { closed = true; AutoCloseables.close(Iterables.concat( collectorMap.values(), Collections.singleton(allocator) )); } }
pluginLock.close();
@SuppressWarnings("resource") public AutoCloseableLock writeLock() { return new AutoCloseableLock(writeLock).open(); }
public void completionArrived(final FragmentStreamComplete completion) { try (AutoCloseableLock lock = sharedIncomingBatchLock.open()) { if (closed) { return; } final DataCollector collector = collector(completion.getSendingMajorFragmentId()); synchronized (collector) { collector.streamCompleted(completion.getSendingMinorFragmentId()); } } }
@SuppressWarnings("resource") AutoCloseableLock readLock() { return new AutoCloseableLock(readLock).open(); }
public void batchArrived(final IncomingDataBatch incomingBatch) throws FragmentSetupException, IOException { if(!incomingBatch.checkAcceptance(allocator.getHeadroom())){ deferredException.addException(UserException.memoryError() .message("Out of memory while receiving incoming message. Message size: %d, Current thread allocation: %d, thread limit: %d.", incomingBatch.size(), allocator.getAllocatedMemory(), allocator.getLimit()) .build(logger)); return; } // we want to make sure that we only generate local record batch reference in the case that we're not closed. // Otherwise we would leak memory. try (AutoCloseableLock lock = sharedIncomingBatchLock.open()) { if (closed) { return; } final DataCollector collector = collector(incomingBatch.getHeader().getSendingMajorFragmentId()); synchronized (collector) { try(final RawFragmentBatch newRawFragmentBatch = incomingBatch.newRawFragmentBatch(allocator)){ collector.batchArrived(incomingBatch.getHeader().getSendingMinorFragmentId(), newRawFragmentBatch); } } } }