@Override public long getBytesSize(Operation operation) { return operation.getBuffer().remaining(); } };
if (op.getState() != OperationState.COMPLETE) { if (!status) { MemcachedConnection.opTimedOut(op); if (!hasZF) { TagList tags = null; if(op.getHandlingNode() instanceof EVCacheNodeImpl) { tags = ((EVCacheNodeImpl)op.getHandlingNode()).getBaseTags(); } else { tags = BasicTagList.of(DataSourceType.COUNTER); if(op.isCancelled()) { TagList tags = null; if(op.getHandlingNode() instanceof EVCacheNodeImpl) { tags = ((EVCacheNodeImpl)op.getHandlingNode()).getBaseTags(); } else { tags = BasicTagList.of(DataSourceType.COUNTER); if (throwException) throw new ExecutionException(new CancellationException("Cancelled")); if (op.hasErrored() && throwException) { if (!hasZF) EVCacheMetricsFactory.getCounter(appName, null, serverGroup.getName(), appName + "-getSome-Error", DataSourceType.COUNTER).increment(); throw new ExecutionException(op.getException());
if (op != null) op.timeOut(); TagList tags = null; if(op.getHandlingNode() instanceof EVCacheNodeImpl) { tags = ((EVCacheNodeImpl)op.getHandlingNode()).getBaseTags(); } else { tags = BasicTagList.of(DataSourceType.COUNTER); if (op != null && op.hasErrored()) { if (throwException) { throw new ExecutionException(op.getException()); if(op.getHandlingNode() instanceof EVCacheNodeImpl) { tags = ((EVCacheNodeImpl)op.getHandlingNode()).getBaseTags(); } else { tags = BasicTagList.of(DataSourceType.COUNTER); if (op != null && op.isTimedOut()) {
public T get(long duration, TimeUnit units) throws InterruptedException, TimeoutException, ExecutionException { if (!latch.await(duration, units)) { // whenever timeout occurs, continuous timeout counter will increase by 1. MemcachedConnection.opTimedOut(op); throw new CheckedOperationTimeoutException( "Timed out waiting for operation. >" + duration + " " + units, op); } else { // continuous timeout counter will be reset MemcachedConnection.opSucceeded(op); } if (op != null && op.hasErrored()) { throw new ExecutionException(op.getException()); } if (op != null && op.isCancelled()) { throw new ExecutionException(new RuntimeException(op.getCancelCause())); } return objRef.get(); }
if (op.isCancelled() || op.isTimedOut()) { return; if (op.getCloneCount() >= MAX_CLONE_COUNT) { getLogger().warn("Cancelling operation " + op + "because it has been " + "retried (cloned) more than " + MAX_CLONE_COUNT + "times."); op.cancel(); return; if (op.getState() == OperationState.WRITE_QUEUED && op.getHandlingNode() != null) { addOperation(op.getHandlingNode(), op); return; for (String key : ((MultiGetOperationImpl) op).getRetryKeys()) { addOperation(key, opFact.get(key, (GetOperation.Callback) op.getCallback())); for (String k : newKeyedOp.getKeys()) { addOperation(k, newop); op.addClone(newop); newop.setCloneCount(op.getCloneCount()+1); newop.cancel(); getLogger().warn("Could not redistribute cloned non-keyed " + "operation", newop); op.cancel();
public void addOperations(final Map<MemcachedNode, Operation> ops) { for(Map.Entry<MemcachedNode, Operation> me : ops.entrySet()) { final MemcachedNode node=me.getKey(); Operation o=me.getValue(); o.setHandlingNode(node); o.initialize(); node.addOp(o); addedQueue.offer(node); } Selector s=selector.wakeup(); assert s == selector : "Wakeup returned the wrong selector."; }
public void addAllOpToInputQ(BlockingQueue<Operation> allOp) { for (Operation op : allOp) { op.setHandlingNode(this); if (op.getState() == OperationState.WRITING && op.getBuffer() != null) { op.getBuffer().reset(); // buffer offset reset } else { op.initialize(); // write completed or not yet initialized op.resetState(); // reset operation state } op.setMoved(true); } addOpCount += allOp.size(); allOp.drainTo(inputQueue); }
assert o.getState() == OperationState.WRITING; ByteBuffer obuf = o.getBuffer(); assert obuf != null : "Didn't get a write buffer from " + o; int bytesToCopy = Math.min(getWbuf().remaining(), obuf.remaining()); getWbuf().put(b); getLogger().debug("After copying stuff from %s: %s", o, getWbuf()); if (!o.getBuffer().hasRemaining()) { o.writeComplete(); transitionWriteItem();
private Operation getNextWritableOp() { Operation o = getCurrentWriteOp(); while (o != null && o.getState() == OperationState.WRITE_QUEUED) { synchronized(o) { if (o.isCancelled()) { getLogger().debug("Not writing cancelled op."); Operation cancelledOp = removeCurrentWriteOp(); assert o == cancelledOp; } else if (o.isTimedOut(defaultOpTimeout)) { getLogger().debug("Not writing timed out op."); Operation timedOutOp = removeCurrentWriteOp(); assert o == timedOutOp; } else { o.writing(); if (!(o instanceof TapAckOperationImpl)) { readQ.add(o); } return o; } o = getCurrentWriteOp(); } } return o; }
op.cancel(); } else if (op != null) { ByteBuffer buf = op.getBuffer(); if (buf != null) { buf.reset(); if (op != getCurrentWriteOp()) { getLogger().warn("Discarding partially completed op: %s", op); op.cancel(); op = removeCurrentWriteOp(); getLogger().warn("Discarding partially completed op: %s", op); op.cancel();
getWbuf().clear(); Operation o=getCurrentWriteOp(); if (o != null && (o.isCancelled())) { getLogger().debug("Not writing cancelled op."); Operation cancelledOp = removeCurrentWriteOp(); return; if (o != null && o.isTimedOut(defaultOpTimeout)) { getLogger().debug("Not writing timed out op."); Operation timedOutOp = removeCurrentWriteOp(); assert o.getState() == OperationState.WRITING; ByteBuffer obuf=o.getBuffer(); assert obuf != null : "Didn't get a write buffer from " + o; int bytesToCopy=Math.min(getWbuf().remaining(), getLogger().debug("After copying stuff from %s: %s", o, getWbuf()); if(!o.getBuffer().hasRemaining()) { o.writeComplete(); transitionWriteItem();
currentOp.readFromBuffer(rbuf); if (currentOp.getState() == OperationState.COMPLETE) { getLogger().debug("Completed read op: %s and giving the next %d " + "bytes", currentOp, rbuf.remaining()); + op; if (op.hasErrored()) { metrics.markMeter(OVERALL_RESPONSE_FAIL_METRIC); } else { metrics.markMeter(OVERALL_RESPONSE_SUCC_METRIC); } else if (currentOp.getState() == OperationState.RETRY) { handleRetryInformation(currentOp.getErrorMsg()); getLogger().debug("Reschedule read op due to NOT_MY_VBUCKET error: " + "%s ", currentOp); ((VBucketAware) currentOp).addNotMyVbucketNode( currentOp.getHandlingNode()); Operation op = node.removeCurrentReadOp(); assert op == currentOp : "Expected to pop " + currentOp + " got "
private void redistributeOperations(Collection<Operation> ops) { for (Operation op : ops) { if (op.isCancelled() || op.isTimedOut()) { continue; } if (op instanceof KeyedOperation) { KeyedOperation ko = (KeyedOperation) op; int added = 0; for (String k : ko.getKeys()) { for (Operation newop : opFact.clone(ko)) { addOperation(k, newop); added++; } } assert added > 0 : "Didn't add any new operations when redistributing"; } else { // Cancel things that don't have definite targets. op.cancel(); } } }
public Collection<Operation> destroyWriteQueue(boolean resend) { Collection<Operation> rv = new ArrayList<Operation>(); writeQ.drainTo(rv); if (resend) { for (Operation o : rv) { if (o.getState() == OperationState.WRITING && o.getBuffer() != null) { o.getBuffer().reset(); // buffer offset reset } else { o.initialize(); // write completed or not yet initialized } } } return rv; }
@Override public boolean isCancelled() { boolean rv = false; for (Operation op : ops) { rv |= op.isCancelled(); } return rv; }
public boolean isDone() { assert op != null : "No operation"; return latch.getCount() == 0 || op.isCancelled() || op.getState() == OperationState.COMPLETE; } }
/** * Set the continuous timeout on an operation. * * Ignore operations which have no handling nodes set yet (which may happen before nodes are properly * authenticated). * * @param op the operation to use. * @param isTimeout is timed out or not. */ private static void setTimeout(final Operation op, final boolean isTimeout) { Logger logger = LoggerFactory.getLogger(MemcachedConnection.class); try { if (op == null || op.isTimedOutUnsent()) { return; } MemcachedNode node = op.getHandlingNode(); if (node != null) { node.setContinuousTimeout(isTimeout); } } catch (Exception e) { logger.error(e.getMessage()); } }
/** * helper method: do some error checking and set timeout boolean * * @param op * @param isTimeout */ private static void setTimeout(Operation op, boolean isTimeout) { try { if (op == null) { LoggerFactory.getLogger(MemcachedConnection.class).debug("op is null."); return; // op may be null in some cases, e.g. flush } MemcachedNode node = op.getHandlingNode(); if (node == null) { LoggerFactory.getLogger(MemcachedConnection.class).debug("handling node for operation is not set"); } else { if (isTimeout || !op.isCancelled()) node.setContinuousTimeout(isTimeout); } } catch (Exception e) { LoggerFactory.getLogger(MemcachedConnection.class).error(e.getMessage()); } }
public T get(long duration, TimeUnit units) throws InterruptedException, TimeoutException, ExecutionException { if(!latch.await(duration, units)) { throw new CheckedOperationTimeoutException( "Timed out waiting for operation", op); } if(op != null && op.hasErrored()) { throw new ExecutionException(op.getException()); } if(isCancelled()) { throw new ExecutionException(new RuntimeException("Cancelled")); } return objRef.get(); }