public static long blobQuery(TrieParserReader reader, TrieParser trie) { Pipe.outputStream(reader.workingPipe).closeLowLevelField(); Pipe.publishWrites(reader.workingPipe); Pipe.confirmLowLevelWrite(reader.workingPipe, Pipe.sizeOf(reader.workingPipe, RawDataSchema.MSG_CHUNKEDSTREAM_1)); /// Pipe.takeMsgIdx(reader.workingPipe); long result = TrieParserReader.query(reader,trie,reader.workingPipe,-1); Pipe.confirmLowLevelRead(reader.workingPipe, Pipe.sizeOf(reader.workingPipe, RawDataSchema.MSG_CHUNKEDSTREAM_1)); Pipe.releaseReadLock(reader.workingPipe); return result; }
/** * Opens the channel reader for reading from beginning of message * @return ChannelReader opened for reading or null if channel has no data */ public ChannelReader beginRead() { if (Pipe.hasContentToRead(pipe)) { Pipe.markTail(pipe); int msg = Pipe.takeMsgIdx(pipe); if (msg >= 0) { isReading = true; return Pipe.openInputStream(pipe); } } return null; }
public static <S extends MessageSchema<S>> void writeLII(Pipe<S> pipe, int msgIdx, long field1, int field2, int field3) { assert(FieldReferenceOffsetManager.isValidMsgIdx(Pipe.from(pipe), msgIdx)); assert(6==Pipe.from(pipe).fragDataSize[msgIdx]) : "This constant does not this fragment size"; int size = Pipe.addMsgIdx(pipe, msgIdx); Pipe.addLongValue(field1, pipe); Pipe.addIntValue(field2, pipe); Pipe.addIntValue(field3, pipe); Pipe.confirmLowLevelWrite(pipe, size); Pipe.publishWrites(pipe); }
public static <S extends MessageSchema<S>> int readI(Pipe<S> pipe) { int msgIdx = Pipe.takeMsgIdx(pipe); assert(3==Pipe.from(pipe).fragDataSize[msgIdx]) : "This constant does not this fragment size"; int value = Pipe.takeInt(pipe); Pipe.confirmLowLevelRead(pipe, 3); Pipe.releaseReadLock(pipe); return value; }
@Override public void visitTemplateClose(String name, long id) { if (needsClose) { needsClose = false; Pipe.confirmLowLevelRead(expectedInput, Pipe.sizeOf(expectedInput, activeCursor)); Pipe.releaseReadLock(expectedInput); } }
public static <S extends MessageSchema<S>> long readL(Pipe<S> pipe) { int msgIdx = Pipe.takeMsgIdx(pipe); assert(3==Pipe.from(pipe).fragDataSize[msgIdx]) : "This constant does not this fragment size"; long value = Pipe.takeLong(pipe); Pipe.confirmLowLevelRead(pipe, 3); Pipe.releaseReadLock(pipe); return value; }
private void logRequestNow(Pipe<HTTPLogRequestSchema> p, Pipe<RawDataSchema> output) { int msgId = Pipe.takeMsgIdx(p); assert(msgId == HTTPLogRequestSchema.MSG_REQUEST_1); long timeNS = Pipe.takeLong(p); //time long chnl = Pipe.takeLong(p); //channelId int seq = Pipe.takeInt(p); //sequenceId DataInputBlobReader<HTTPLogRequestSchema> header = Pipe.openInputStream(p); //head //batch the writes.. int esitmate = 100+header.available(); batchMessages(output, esitmate); publishLogMessage(timeNS, chnl, seq, -1, BYTES_REQUEST, header, Pipe.outputStream(output)); Pipe.confirmLowLevelRead(p, Pipe.sizeOf(p, HTTPLogRequestSchema.MSG_REQUEST_1)); Pipe.releaseReadLock(p); }
while (Pipe.hasContentToRead(p)) { Pipe.markTail(p); int msgIdx = Pipe.takeMsgIdx(p); Pipe.resetTail(p); return;//continue later and repeat this same value. Pipe.resetTail(p); return;//continue later and repeat this same value. long value = Pipe.takeLong(p); DataInputBlobReader<PersistedBlobLoadConsumerSchema> stream = Pipe.openInputStream(p); Pipe.resetTail(p); return;//continue later and repeat this same value. Pipe.confirmLowLevelRead(p, Pipe.sizeOf(p,msgIdx)); Pipe.releaseReadLock(p);
int tokenForPrivateTopic(TopicWritable topic) { if (null==publishPrivateTopics) { return -1; } if (null==tempTopicPipe) { tempTopicPipe = RawDataSchema.instance.newPipe(2, maxDynamicTopicLength); tempTopicPipe.initBuffers(); } int size = Pipe.addMsgIdx(tempTopicPipe, RawDataSchema.MSG_CHUNKEDSTREAM_1); DataOutputBlobWriter<RawDataSchema> output = Pipe.openOutputStream(tempTopicPipe); topic.write(output); DataOutputBlobWriter.closeLowLevelField(output); Pipe.confirmLowLevelWrite(tempTopicPipe, size); Pipe.publishWrites(tempTopicPipe); Pipe.takeMsgIdx(tempTopicPipe); int token = publishPrivateTopics.getToken(tempTopicPipe); Pipe.confirmLowLevelRead(tempTopicPipe, size); Pipe.releaseReadLock(tempTopicPipe); return token; }
public static void appendNextFieldToReader(LittleEndianDataInputBlobReader reader, Pipe<RawDataSchema> targetPipe) { while (Pipe.hasContentToRead(targetPipe) && Pipe.peekInt(targetPipe) >=0) { Pipe.takeMsgIdx(targetPipe); accumLowLevelAPIField(reader); Pipe.readNextWithoutReleasingReadLock(targetPipe); Pipe.confirmLowLevelRead(targetPipe, Pipe.sizeOf(targetPipe, RawDataSchema.MSG_CHUNKEDSTREAM_1)); } }
private void extractBytes() { Pipe.takeMsgIdx(pipe); int meta = Pipe.takeByteArrayMetaData(pipe); length = Pipe.takeByteArrayLength(pipe); backing = Pipe.byteBackingArray(meta, pipe); mask = Pipe.blobMask(pipe); position = Pipe.bytePosition(meta, pipe, length); Pipe.confirmLowLevelWrite(pipe); Pipe.publishWrites(pipe); }
public static <T> void processAssociatedOperator(Pipe<RawDataSchema> input, T that, Pipe<RawDataSchema> output) { int msgIdx = Pipe.takeMsgIdx(input); ChannelReader inStream = Pipe.openInputStream(input); assert(inStream.isStructured()); int size = Pipe.addMsgIdx(output, RawDataSchema.MSG_CHUNKEDSTREAM_1); ChannelWriter outStream = Pipe.openOutputStream(output); Pipe.confirmLowLevelWrite(output, size); Pipe.publishWrites(output); Pipe.confirmLowLevelRead(input, SIZE_OF); Pipe.releaseReadLock(input);
FailableWrite publishFailableOnPrivateTopic(int token, FailableWritable writable) { //this is a private topic Pipe<MessagePrivate> output = publishPrivateTopics.getPipe(token); if (Pipe.hasRoomForWrite(output)) { DataOutputBlobWriter<MessagePrivate> writer = Pipe.openOutputStream(output); FailableWrite result = writable.write(writer); if (result == FailableWrite.Cancel) { output.closeBlobFieldWrite(); } else { int size = Pipe.addMsgIdx(output, MessagePrivate.MSG_PUBLISH_1); DataOutputBlobWriter.closeLowLevelField(writer); Pipe.confirmLowLevelWrite(output, size); Pipe.publishWrites(output); } return result; } else { return FailableWrite.Retry; } }
@Override public void visitSequenceOpen(String name, long id, int length) { int tempLen; if ((tempLen=Pipe.takeInt(expectedInput))!=length) { throw new AssertionError("expected length: "+Long.toHexString(tempLen)+" but got "+Long.toHexString(length)); } needsClose = false; Pipe.confirmLowLevelRead(expectedInput, Pipe.sizeOf(expectedInput, activeCursor)); Pipe.releaseReadLock(expectedInput); }
private void processData() { DataInputBlobReader<RawDataSchema> inputStream = Pipe.inputStream(source); while (targetRemaining>0 && inputStream.available()>0 && Pipe.hasRoomForWrite(targets[targetPipeIdx]) ) { int size = Pipe.addMsgIdx(t, RawDataSchema.MSG_CHUNKEDSTREAM_1); DataOutputBlobWriter<RawDataSchema> outputStream = Pipe.openOutputStream(t); inputStream.readInto(outputStream, toCopyLength); DataOutputBlobWriter.closeLowLevelField(outputStream); Pipe.confirmLowLevelWrite(t, size); Pipe.publishWrites(t); Pipe.releasePendingAsReadLock(source, toCopyLength);
private void sendOrderedResults(int totalBuckets, long totalSum) { int size = Pipe.addMsgIdx(output, ProbabilitySchema.MSG_SELECTION_1); Pipe.addLongValue(totalSum, output); Pipe.addIntValue(totalBuckets, output); DataOutputBlobWriter<ProbabilitySchema> writer = Pipe.openOutputStream(output); for(int c=0;c<totalBuckets;c++) { writer.writePackedLong(sortWorkspace[c][0]); //count for this writer.writePackedLong(sortWorkspace[c][1]); //index location for this } DataOutputBlobWriter.closeLowLevelField(writer); Pipe.confirmLowLevelWrite(output, size); Pipe.publishWrites(output); }
private void batchMessages(Pipe<RawDataSchema> output, int esitmate) { if ((!messageOpen) || (Pipe.outputStream(output).remaining() < (esitmate+(1<<12))) ) { if (messageOpen) { //add to end of each file, when there is room. if (Pipe.outputStream(output).remaining()>(1<<12)) { Pipe.outputStream(output).append("\n"); etr.report(Pipe.outputStream(output)); } DataOutputBlobWriter.closeLowLevelField(Pipe.outputStream(output)); Pipe.confirmLowLevelWrite(output, Pipe.sizeOf(output, RawDataSchema.MSG_CHUNKEDSTREAM_1)); Pipe.publishWrites(output); messageOpen=false; } Pipe.addMsgIdx(output, RawDataSchema.MSG_CHUNKEDSTREAM_1); Pipe.openOutputStream(output); messageOpen=true; } }
private void requestPing(long now, long connectionId, Pipe<NetPayloadSchema> server) { Pipe.presumeRoomForWrite(server); int size = Pipe.addMsgIdx(server, NetPayloadSchema.MSG_PLAIN_210); Pipe.addLongValue(connectionId, server); Pipe.addLongValue(now, server); Pipe.addLongValue(0, server); //always use zero for client requests DataOutputBlobWriter<NetPayloadSchema> output = Pipe.openOutputStream(server); output.writeByte(0xC0); output.writeByte(0x00); output.closeLowLevelField(); Pipe.confirmLowLevelWrite(server, size); Pipe.publishWrites(server); //logger.info("wrote block of {}",len2); }
/** * Open the message for writing * @return returns the ChannelWriter or null if there is no room to write. */ public ChannelWriter beginWrite() { if (Pipe.hasRoomForWrite(pipe)) { Pipe.markHead(pipe); Pipe.addMsgIdx(pipe, RawDataSchema.MSG_CHUNKEDSTREAM_1); isWriting = true; return Pipe.openOutputStream(pipe); } return null; }
private void writeBytesToOutput(Pipe<PhastCodecSchema> output1, Pipe<RawDataSchema> output2, DataOutputBlobWriter<RawDataSchema> output2Writer, DataInputBlobReader<RawDataSchema> reader) { Pipe.addMsgIdx(output1, PhastCodecSchema.MSG_BLOBCHUNK_1000); Pipe.confirmLowLevelWrite(output1, Pipe.sizeOf(output1, PhastCodecSchema.MSG_BLOBCHUNK_1000)); Pipe.addMsgIdx(output2, RawDataSchema.MSG_CHUNKEDSTREAM_1); output2Writer.openField(); DataOutputBlobWriter.writeBytes(output2Writer, reader, bytesRemainingToCopy); output2Writer.closeLowLevelField(); bytesRemainingToCopy = 0; Pipe.confirmLowLevelWrite(output2, Pipe.sizeOf(output2, RawDataSchema.MSG_CHUNKEDSTREAM_1)); Pipe.publishWrites(output2); Pipe.publishWrites(output1); }