Refine search
public HashMap<String, HashMap<Channel, ClientChannelInfo>> getGroupChannelTable() { HashMap<String /* group name */, HashMap<Channel, ClientChannelInfo>> newGroupChannelTable = new HashMap<String, HashMap<Channel, ClientChannelInfo>>(); try { if (this.groupChannelLock.tryLock(LOCK_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS)) { try { newGroupChannelTable.putAll(groupChannelTable); } finally { groupChannelLock.unlock(); } } } catch (InterruptedException e) { log.error("", e); } return newGroupChannelTable; }
@Override public boolean tryLock() { final boolean obtainedLock = lock.tryLock(); if (obtainedLock && isDirty()) { // once we have obtained the lock, we need to check if the writer // has been marked dirty. If so, we cannot write to the underlying // file, so we need to unlock and return false. Otherwise, it's okay // to write to the underlying file, so return true. lock.unlock(); return false; } return obtainedLock; }
private boolean tryFlushMessageBuffer() throws IOException { if (this.flushLock.tryLock()) { try { while (true) { WebSocketMessage<?> message = this.buffer.poll(); if (message == null || shouldNotSend()) { break; } this.bufferSize.addAndGet(-message.getPayloadLength()); this.sendStartTime = System.currentTimeMillis(); getDelegate().sendMessage(message); this.sendStartTime = 0; } } finally { this.sendStartTime = 0; this.flushLock.unlock(); } return true; } return false; }
/** {@inheritDoc} */ @Override public void addEventListener(HadoopIgfsIpcIoListener lsnr) { if (!busyLock.readLock().tryLock()) { lsnr.onClose(); return; } boolean invokeNow = false; try { invokeNow = stopping; if (!invokeNow) lsnrs.add(lsnr); } finally { busyLock.readLock().unlock(); if (invokeNow) lsnr.onClose(); } }
/** * Adds a handler to the Lifecycle. If the lifecycle has already been started, it throws an {@link ISE} * * @param handler The hander to add to the lifecycle * @param stage The stage to add the lifecycle at * * @throws ISE indicates that the lifecycle has already been started and thus cannot be added to */ public void addHandler(Handler handler, Stage stage) { if (!startStopLock.tryLock()) { throw new ISE("Cannot add a handler in the process of Lifecycle starting or stopping"); } try { if (!state.get().equals(State.NOT_STARTED)) { throw new ISE("Cannot add a handler after the Lifecycle has started, it doesn't work that way."); } handlers.get(stage).add(handler); } finally { startStopLock.unlock(); } }
protected <A extends Annotation> LockProcessor<A, java.util.concurrent.locks.Lock> initLockInfo(long timeout, TimeUnit timeUnit, LockProcessor<A, java.util.concurrent.locks.Lock> lockProcessor) { return lockProcessor .lock(lock -> lock.tryLock(timeout, timeUnit)) .unlock(lock -> { lock.unlock(); return true; }).init(); }
/** * Attempts to acquire the eviction lock and apply the pending operations, up * to the amortized threshold, to the page replacement policy. */ void tryToDrainBuffers() { if (evictionLock.tryLock()) { try { drainStatus.lazySet(PROCESSING); drainBuffers(); } finally { drainStatus.compareAndSet(PROCESSING, IDLE); evictionLock.unlock(); } } }
protected Set<Path> performListing(final ProcessContext context, Path path) throws IOException { Set<Path> listing = null; if (listingLock.tryLock()) { try { final FileSystem hdfs = getFileSystem(); // get listing listing = selectFiles(hdfs, path, null); } finally { listingLock.unlock(); } } return listing; }
private void cleanCeQueue() { Lock ceCleaningJobLock = ceDistributedInformation.acquireCleanJobLock(); // If we cannot lock that means that another job is running // So we skip resetting and cancelling tasks in queue if (ceCleaningJobLock.tryLock()) { try { resetTasksWithUnknownWorkerUUIDs(); cancelWornOuts(); } finally { ceCleaningJobLock.unlock(); } } }