private void releaseSocketData(long connectionId, long netPosition) { Pipe.presumeRoomForWrite(ackReleaseForResponseParser); FragmentWriter.writeLL(ackReleaseForResponseParser, ReleaseSchema.MSG_RELEASE_100, connectionId, netPosition); }
private int stopReSendingMessage(Pipe<MQTTClientToServerSchema> clientToSerer, int packetId) { //////////////////////// ///stop re-sending the message /////////////////////// Pipe.presumeRoomForWrite(clientToServerAck); FragmentWriter.writeI(clientToServerAck, MQTTClientToServerSchemaAck.MSG_STOPREPUBLISH_99, packetId); return packetId; }
private void storePublishedPosPersisted(int blobPosition, int blobConsumed, byte[] blob, final int packetId) { Pipe.presumeRoomForWrite(persistBlobStoreProducer); FragmentWriter.writeLV(persistBlobStoreProducer, PersistedBlobStoreProducerSchema.MSG_BLOCK_1, packetId, //persist store supports long but we only have a packetId. blob, blobPosition, blobConsumed ); }
private void requestReplayOfPersistedCollection() { Pipe.presumeRoomForWrite(persistBlobStoreConsumer); FragmentWriter.write(persistBlobStoreConsumer, PersistedBlobStoreConsumerSchema.MSG_REQUESTREPLAY_6); }
private void clearPersistedCollection() { Pipe.presumeRoomForWrite(persistBlobStoreConsumer); FragmentWriter.write(persistBlobStoreConsumer, PersistedBlobStoreConsumerSchema.MSG_CLEAR_12); }
public static <S extends MessageSchema<S>> boolean monitor(Pipe<S> sourcePipe, long sourceSlabPos, int sourceBlobPos) { if (isMonitored(sourcePipe.id)) { Pipe<S> localTarget = targetPipes[sourcePipe.id]; //will report errors if the logger does not keep this pipe clear //but it also blocks to ensure nothing is ever lost Pipe.presumeRoomForWrite(localTarget); Pipe.copyFragment(sourcePipe, sourceSlabPos, sourceBlobPos, localTarget); } return true; }
public static <S extends MessageSchema<S>> boolean monitor(Pipe<S> sourcePipe, long sourceSlabPos, int sourceBlobPos) { if (isMonitored(sourcePipe.id)) { Pipe<S> localTarget = targetPipes[sourcePipe.id]; //will report errors if the logger does not keep this pipe clear //but it also blocks to ensure nothing is ever lost Pipe.presumeRoomForWrite(localTarget); Pipe.copyFragment(sourcePipe, sourceSlabPos, sourceBlobPos, localTarget); } return true; }
public static <S extends MessageSchema<S>> boolean monitor(Pipe<S> sourcePipe, long sourceSlabPos, int sourceBlobPos) { if (isMonitored(sourcePipe.id)) { Pipe<S> localTarget = targetPipes[sourcePipe.id]; //will report errors if the logger does not keep this pipe clear //but it also blocks to ensure nothing is ever lost Pipe.presumeRoomForWrite(localTarget); Pipe.copyFragment(sourcePipe, sourceSlabPos, sourceBlobPos, localTarget); } return true; }
private void clearAllStoredData() { int i = fileControl.length; clearInProgress = (byte)i; clearIdMap(); isDirty = false; fileSizeWritten = 0; while (--i>=0) { //clear every file Pipe<SequentialCtlSchema> output = fileControl[i]; Pipe.presumeRoomForWrite(output); FragmentWriter.write(output, SequentialCtlSchema.MSG_CLEAR_2); } }
public void ackPublishedPos(int packetId) { if (isPersistantSession) { //logger.trace("BBB must clear top level ack from drive {} ",packetId); Pipe.presumeRoomForWrite(persistBlobStoreConsumer); FragmentWriter.writeL(persistBlobStoreConsumer, PersistedBlobStoreConsumerSchema.MSG_RELEASE_7, packetId ); //will clear locally upon ack } else { //logger.trace("BBB must clear top level ack form memory {} ",packetId); ackPublishedPosLocal(packetId); } }
private static void sendRelease(Pipe<NetPayloadSchema> source, Pipe<ReleaseSchema> release, BaseConnection cc, boolean isServer) { Pipe.presumeRoomForWrite(release); sendReleaseRec(source, release, cc, cc.getSequenceNo(), isServer); }
@Override public void startup() { assert(fileControl.length == 3); int i = fileControl.length-1; //skip the ack index file this.waitCount = i; assert(waitCount==2) : "only 2 is supported"; while (--i>=0) { Pipe<SequentialCtlSchema> output = fileControl[i]; Pipe.presumeRoomForWrite(output); FragmentWriter.write(output, SequentialCtlSchema.MSG_METAREQUEST_3); } //will grow as needed mapSet = new LongHashSet(15); }
private void markDoneAndRelease(int idx) { //System.err.println("done with connection"); ((Buffer)workingBuffers[idx]).clear(); writeToChannel[idx]=null; int sequenceNo = 0;//not available here if (null!=releasePipe) { Pipe.presumeRoomForWrite(releasePipe); publishRelease(releasePipe, activeIds[idx], activeTails[idx]!=-1?activeTails[idx]: Pipe.tailPosition(input[idx]), sequenceNo); } }
/** * * @param timeMS long arg used in PipeWriter.writeLong * @param gcc MsgCommandChannel used in PipeWriter.presumeWriteFragment and .publishWrites */ public static void publishBlockChannelUntil(long timeMS, MsgCommandChannel<?> gcc) { if (null != gcc.goPipe) { Pipe.presumeRoomForWrite(gcc.goPipe); int size = Pipe.addMsgIdx(gcc.goPipe, TrafficOrderSchema.MSG_BLOCKCHANNELUNTIL_23); Pipe.addLongValue(timeMS, gcc.goPipe); Pipe.confirmLowLevelRead(gcc.goPipe, size); Pipe.publishWrites(gcc.goPipe); } else { logger.info("Unable to use block channel for ns without an additional feature or USE_DELAY can be added."); } }
/** * * @param durationNanos long arg used in PipeWriter.writeLong * @param gcc MsgCommandChannel used in PipeWriter.presumeWriteFragment and .publishWrites */ public static void publishBlockChannel(long durationNanos, MsgCommandChannel<?> gcc) { if (null != gcc.goPipe) { Pipe.presumeRoomForWrite(gcc.goPipe); int size = Pipe.addMsgIdx(gcc.goPipe, TrafficOrderSchema.MSG_BLOCKCHANNEL_22); Pipe.addLongValue(durationNanos, gcc.goPipe); Pipe.confirmLowLevelRead(gcc.goPipe, size); Pipe.publishWrites(gcc.goPipe); } else { logger.info("Unable to use block channel for ns without an additional feature use or USE_DELAY can be added."); } }
private int sendRelease(final int stateIdx, long ccId, long[] position, int i) { Pipe.presumeRoomForWrite(releasePipe); int size = Pipe.addMsgIdx(releasePipe, ReleaseSchema.MSG_RELEASE_100); Pipe.addLongValue(ccId, releasePipe); Pipe.addLongValue(position[i], releasePipe); Pipe.confirmLowLevelWrite(releasePipe, size); Pipe.publishWrites(releasePipe); positionMemoData[stateIdx] = 0; position[i] = -1; return 1; }
private void sendRelease(long channel, final int idx) { assert(channel>=0) : "channel must exist"; if (inputSlabPos[idx]>=0) { Pipe.presumeRoomForWrite(releasePipe); int s = Pipe.addMsgIdx(releasePipe, ReleaseSchema.MSG_RELEASEWITHSEQ_101); Pipe.addLongValue(channel, releasePipe); Pipe.addLongValue(inputSlabPos[idx], releasePipe); Pipe.addIntValue(sequences[idx], releasePipe); //send current sequence number so others can continue at this count. Pipe.confirmLowLevelWrite(releasePipe, s); Pipe.publishWrites(releasePipe); this.inputSlabPos[idx]=-1; } }
private static void logTraffic(HTTP1xRouterStage that, Pipe<NetPayloadSchema> selectedInput, final long channel, long arrivalTime, int seqForLogging, int posForLogging, int totalConsumed) { //this logs every input at this point Pipe<HTTPLogRequestSchema> logOut = that.log; Pipe.presumeRoomForWrite(logOut);//checked above int size = Pipe.addMsgIdx(logOut, HTTPLogRequestSchema.MSG_REQUEST_1); Pipe.addLongValue(arrivalTime, logOut); Pipe.addLongValue(channel, logOut); Pipe.addIntValue(seqForLogging, logOut); Pipe.addByteArray(Pipe.blob(selectedInput), posForLogging, totalConsumed, that.trieReader.sourceMask, logOut); Pipe.confirmLowLevelWrite(logOut, size); Pipe.publishWrites(logOut); }
private void requestPing(long now, long connectionId, Pipe<NetPayloadSchema> server) { Pipe.presumeRoomForWrite(server); int size = Pipe.addMsgIdx(server, NetPayloadSchema.MSG_PLAIN_210); Pipe.addLongValue(connectionId, server); Pipe.addLongValue(now, server); Pipe.addLongValue(0, server); //always use zero for client requests DataOutputBlobWriter<NetPayloadSchema> output = Pipe.openOutputStream(server); output.writeByte(0xC0); output.writeByte(0x00); output.closeLowLevelField(); Pipe.confirmLowLevelWrite(server, size); Pipe.publishWrites(server); //logger.info("wrote block of {}",len2); }
public static void cleanCloseConnection(Pipe<ClientHTTPRequestSchema> requestPipe, ClientConnection connectionToKill, Pipe<NetPayloadSchema> pipe) { //must consume each field so we can move forward, this is required or the position will be off... //matches ClientHTTPRequestSchema.MSG_CLOSECONNECTION_104 int session = Pipe.takeInt(requestPipe); int port = Pipe.takeInt(requestPipe); int hostId = Pipe.takeInt(requestPipe); long conId = Pipe.takeLong(requestPipe); //do not close that will be done by last stage //must be done first before we send the message connectionToKill.beginDisconnect(); Pipe.presumeRoomForWrite(pipe); int size = Pipe.addMsgIdx(pipe, NetPayloadSchema.MSG_DISCONNECT_203); Pipe.addLongValue(connectionToKill.getId(), pipe);// NetPayloadSchema.MSG_DISCONNECT_203_FIELD_CONNECTIONID_201, connectionToKill.getId()); Pipe.confirmLowLevelWrite(pipe, size); Pipe.publishWrites(pipe); }