/** * Append data to the store. * Because ordering dictates that there only be one outstanding append from a given connection, this is implemented * by adding the append to a queue. */ @Override public void append(Append append) { log.trace("Processing append received from client {}", append); UUID id = append.getWriterId(); synchronized (lock) { Long lastEventNumber = latestEventNumbers.get(Pair.of(append.getSegment(), id)); Preconditions.checkState(lastEventNumber != null, "Data from unexpected connection: %s.", id); Preconditions.checkState(append.getEventNumber() >= lastEventNumber, "Event was already appended."); waitingAppends.put(id, append); } pauseOrResumeReading(); performNextWrite(); }
private void validateAppend(Append append, Session session) { if (session == null || !session.id.equals(append.getWriterId())) { throw new InvalidMessageException("Sending appends without setting up the append."); } if (append.getEventNumber() <= session.lastEventNumber) { throw new InvalidMessageException("Events written out of order. Received: " + append.getEventNumber() + " following: " + session.lastEventNumber); } if (append.isConditional()) { throw new IllegalArgumentException("Conditional appends should be written via a ConditionalAppend object."); } Preconditions.checkState(bytesLeftInBlock == 0 || bytesLeftInBlock > TYPE_PLUS_LENGTH_SIZE, "Bug in CommandEncoder.encode, block is too small."); }
private CompletableFuture<Void> storeAppend(Append append) { long lastEventNumber; synchronized (lock) { lastEventNumber = latestEventNumbers.get(Pair.of(append.getSegment(), append.getWriterId())); } List<AttributeUpdate> attributes = Arrays.asList( new AttributeUpdate(append.getWriterId(), AttributeUpdateType.ReplaceIfEquals, append.getEventNumber(), lastEventNumber), new AttributeUpdate(EVENT_COUNT, AttributeUpdateType.Accumulate, append.getEventCount())); ByteBuf buf = append.getData().asReadOnly(); byte[] bytes = new byte[buf.readableBytes()]; buf.readBytes(bytes); if (append.isConditional()) { return store.append(append.getSegment(), append.getExpectedLength(), bytes, attributes, TIMEOUT); } else { return store.append(append.getSegment(), bytes, attributes, TIMEOUT); } }
long previousEventNumber; synchronized (lock) { previousEventNumber = latestEventNumbers.get(Pair.of(append.getSegment(), append.getWriterId())); Preconditions.checkState(outstandingAppend == append, "Synchronization error in: %s while processing append: %s.", if (conditionalFailed) { log.debug("Conditional append failed due to incorrect offset: {}, {}", append, exception.getMessage()); connection.send(new ConditionalCheckFailed(append.getWriterId(), append.getEventNumber())); } else { handleException(append.getWriterId(), append.getEventNumber(), append.getSegment(), "appending data", exception); final DataAppended dataAppendedAck = new DataAppended(append.getWriterId(), append.getEventNumber(), previousEventNumber); log.trace("Sending DataAppended : {}", dataAppendedAck); outstandingAppend = null; if (exception == null) { latestEventNumbers.put(Pair.of(append.getSegment(), append.getWriterId()), append.getEventNumber()); } else { if (!conditionalFailed) { waitingAppends.removeAll(append.getWriterId()); latestEventNumbers.remove(Pair.of(append.getSegment(), append.getWriterId())); performNextWrite(); } catch (Throwable e) { handleException(append.getWriterId(), append.getEventNumber(), append.getSegment(), "handling append result", e);