private static boolean exitSearch(FieldReferenceOffsetManager from, int x) { int token = from.tokens[x]; int type = TokenBuilder.extractType(token); boolean isGroupClosed = TypeMask.Group == type && (0 != (token & (OperatorMask.Group_Bit_Close << TokenBuilder.SHIFT_OPER))) && (0 != (token & (OperatorMask.Group_Bit_Templ << TokenBuilder.SHIFT_OPER))); return isGroupClosed; }
private static boolean exitSearch(FieldReferenceOffsetManager from, int x) { int token = from.tokens[x]; int type = TokenBuilder.extractType(token); boolean isGroupClosed = TypeMask.Group == type && (0 != (token & (OperatorMask.Group_Bit_Close << TokenBuilder.SHIFT_OPER))) && (0 != (token & (OperatorMask.Group_Bit_Templ << TokenBuilder.SHIFT_OPER))); return isGroupClosed; }
private static boolean exitSearch(FieldReferenceOffsetManager from, int x) { int token = from.tokens[x]; int type = TokenBuilder.extractType(token); boolean isGroupClosed = TypeMask.Group == type && (0 != (token & (OperatorMask.Group_Bit_Close << TokenBuilder.SHIFT_OPER))) && (0 != (token & (OperatorMask.Group_Bit_Templ << TokenBuilder.SHIFT_OPER))); return isGroupClosed; }
public static boolean isGroup(FieldReferenceOffsetManager from, int cursor) { return TypeMask.Group == TokenBuilder.extractType(from.tokens[cursor]); }
public static boolean isGroup(FieldReferenceOffsetManager from, int cursor) { return TypeMask.Group == TokenBuilder.extractType(from.tokens[cursor]); }
public static boolean isGroup(FieldReferenceOffsetManager from, int cursor) { return TypeMask.Group == TokenBuilder.extractType(from.tokens[cursor]); }
public static long pmapBuilderString(long pmap, int token, boolean isNull){ int type = TokenBuilder.extractType(token); if (TypeMask.isOptional(type)){ pmap = (pmap << 1) + (isNull? LEAST_FREQUENT_CASE:MOST_FREQUENT_CASE); } return pmap; }
private static int buildFieldLoc(FieldReferenceOffsetManager from, int fragmentStart, int fieldCursor) { final int stackOff = from.fragDepth[fragmentStart]<<RW_STACK_OFF_SHIFT; final int shiftedFieldType = TokenBuilder.extractType(from.tokens[fieldCursor])<<RW_FIELD_OFF_BITS; //type is 5 bits of information //the remaining bits for the offset is 32 -(4+5) or 23 which is 8M for the fixed portion of any fragment int fieldOff = (0==fieldCursor) ? from.templateOffset+1 : from.fragDataSize[fieldCursor]; assert(fieldOff>=0); assert(fieldOff < (1<<RW_FIELD_OFF_BITS)) : "Fixed portion of a fragment can not be larger than "+(1<<RW_FIELD_OFF_BITS)+" bytes"; final int loc = stackOff | shiftedFieldType | fieldOff; // 6bits 5bits 21bit // high bit is going to be zero for stacks less than 32 // low 21 is always going to be a small number offset from front of fragment. assert(FieldReferenceOffsetManager.extractTypeFromLoc(loc) == (shiftedFieldType>>RW_FIELD_OFF_BITS)) : "type encode decode round trip for LOC does not pass"; return loc; }
protected void encodePmapBuilderInt(MessageSchema schema, Appendable target, int token, int index, String valName, String isNull) { try { //TODO: add support for isnull appendStaticCall(target, encoder, "pmapBuilderInt") .append(pmapName).append(", ") .append(Integer.toString(TokenBuilder.extractType(token))).append(", ") .append(Integer.toString(TokenBuilder.extractOper(token))).append(", ") .append(valName).append(", ") .append(intDictionaryName + "[" + index + "]").append(", ") .append(defIntDictionaryName + "[" + index + "]").append(", ") .append(isNull) .append(");\n"); } catch (IOException e) { throw new RuntimeException(e); } }
private static int buildFieldLoc(FieldReferenceOffsetManager from, int fragmentStart, int fieldCursor) { final int stackOff = from.fragDepth[fragmentStart]<<RW_STACK_OFF_SHIFT; final int shiftedFieldType = TokenBuilder.extractType(from.tokens[fieldCursor])<<RW_FIELD_OFF_BITS; //type is 5 bits of information //the remaining bits for the offset is 32 -(4+5) or 23 which is 8M for the fixed portion of any fragment int fieldOff = (0==fieldCursor) ? from.templateOffset+1 : from.fragDataSize[fieldCursor]; assert(fieldOff>=0); assert(fieldOff < (1<<RW_FIELD_OFF_BITS)) : "Fixed portion of a fragment can not be larger than "+(1<<RW_FIELD_OFF_BITS)+" bytes"; final int loc = stackOff | shiftedFieldType | fieldOff; // 6bits 5bits 21bit // high bit is going to be zero for stacks less than 32 // low 21 is always going to be a small number offset from front of fragment. assert(FieldReferenceOffsetManager.extractTypeFromLoc(loc) == (shiftedFieldType>>RW_FIELD_OFF_BITS)) : "type encode decode round trip for LOC does not pass"; return loc; }
private static int buildFieldLoc(FieldReferenceOffsetManager from, int fragmentStart, int fieldCursor) { final int stackOff = from.fragDepth[fragmentStart]<<RW_STACK_OFF_SHIFT; final int shiftedFieldType = TokenBuilder.extractType(from.tokens[fieldCursor])<<RW_FIELD_OFF_BITS; //type is 5 bits of information //the remaining bits for the offset is 32 -(4+5) or 23 which is 8M for the fixed portion of any fragment int fieldOff = (0==fieldCursor) ? from.templateOffset+1 : from.fragDataSize[fieldCursor]; assert(fieldOff>=0); assert(fieldOff < (1<<RW_FIELD_OFF_BITS)) : "Fixed portion of a fragment can not be larger than "+(1<<RW_FIELD_OFF_BITS)+" bytes"; final int loc = stackOff | shiftedFieldType | fieldOff; // 6bits 5bits 21bit // high bit is going to be zero for stacks less than 32 // low 21 is always going to be a small number offset from front of fragment. assert(FieldReferenceOffsetManager.extractTypeFromLoc(loc) == (shiftedFieldType>>RW_FIELD_OFF_BITS)) : "type encode decode round trip for LOC does not pass"; return loc; }
protected void encodePmapBuilderLong(MessageSchema schema, Appendable target, int token, int index, String valName, String isNull) { try { appendStaticCall(target, encoder, "pmapBuilderLong") .append(pmapName).append(", ") .append(Integer.toString(TokenBuilder.extractType(token))).append(", ") .append(Integer.toString(TokenBuilder.extractOper(token))).append(", ") .append(valName).append(", ") .append(longDictionaryName + "[" + index + "]").append(", ") .append(defLongDictionaryName + "[" + index + "]").append(", ") .append(isNull) .append(");\n"); } catch (IOException e) { throw new RuntimeException(e); } }
curCursor += TypeMask.scriptTokenSize[TokenBuilder.extractType(token)];
public static String tokenToString(int token) { assert(token<0) : "This is not a token"; if (token == -1) { return "Unknown"; } int type = extractType(token); int count = token & TokenBuilder.MAX_INSTANCE; int opp = (token >> TokenBuilder.SHIFT_OPER) & TokenBuilder.MASK_OPER; if (isInValidCombo(type, opp)) { throw new UnsupportedOperationException("bad token"); } if (TypeMask.Group==type || TypeMask.Dictionary==type) { return TypeMask.methodTypeName[type] + TypeMask.methodTypeSuffix[type] + "/" + OperatorMask.toString(type, opp) + "/" + count; } else { return TypeMask.methodTypeName[type] + TypeMask.methodTypeSuffix[type] + "/" + OperatorMask.methodOperatorName[opp] + "/" + count; } }
public static String tokenToString(int token) { assert(token<0) : "This is not a token"; if (token == -1) { return "Unknown"; } int type = extractType(token); int count = token & TokenBuilder.MAX_INSTANCE; int opp = (token >> TokenBuilder.SHIFT_OPER) & TokenBuilder.MASK_OPER; if (isInValidCombo(type, opp)) { throw new UnsupportedOperationException("bad token"); } if (TypeMask.Group==type || TypeMask.Dictionary==type) { return TypeMask.methodTypeName[type] + TypeMask.methodTypeSuffix[type] + "/" + OperatorMask.toString(type, opp) + "/" + count; } else { return TypeMask.methodTypeName[type] + TypeMask.methodTypeSuffix[type] + "/" + OperatorMask.methodOperatorName[opp] + "/" + count; } }
public static String tokenToString(int token) { assert(token<0) : "This is not a token"; if (token == -1) { return "Unknown"; } int type = extractType(token); int count = token & TokenBuilder.MAX_INSTANCE; int opp = (token >> TokenBuilder.SHIFT_OPER) & TokenBuilder.MASK_OPER; if (isInValidCombo(type, opp)) { throw new UnsupportedOperationException("bad token"); } if (TypeMask.Group==type || TypeMask.Dictionary==type) { return TypeMask.methodTypeName[type] + TypeMask.methodTypeSuffix[type] + "/" + OperatorMask.toString(type, opp) + "/" + count; } else { return TypeMask.methodTypeName[type] + TypeMask.methodTypeSuffix[type] + "/" + OperatorMask.methodOperatorName[opp] + "/" + count; } }
static void setMsgIdx(StackStateWalker rw, int idx, long llwHeadPosCache) { rw.msgIdxPrev = rw.msgIdx; rw.msgIdx = idx; assert(idx < rw.from.fragDataSize.length) : "Bad msgIdx out of range"; assert(idx>-3): "Bad msgIdx too small "; assert(isMsgIdxStartNewMessage(idx, rw)) : "Bad msgIdx is not a starting point. "; //This validation is very important, because all down stream consumers will assume it to be true. assert(-1 ==idx || (rw.from.hasSimpleMessagesOnly && 0==rw.msgIdx && rw.from.messageStarts.length==1) || TypeMask.Group == TokenBuilder.extractType(rw.from.tokens[rw.msgIdx])) : errorMessageForMessageStartValidation(rw, llwHeadPosCache); assert(-1 ==idx || (rw.from.hasSimpleMessagesOnly && 0==rw.msgIdx && rw.from.messageStarts.length==1) || (OperatorMask.Group_Bit_Close&TokenBuilder.extractOper(rw.from.tokens[rw.msgIdx])) == 0) : errorMessageForMessageStartValidation(rw, llwHeadPosCache); }
static void setMsgIdx(StackStateWalker rw, int idx, long llwHeadPosCache) { rw.msgIdxPrev = rw.msgIdx; rw.msgIdx = idx; assert(idx < rw.from.fragDataSize.length) : "Bad msgIdx out of range"; assert(idx>-3): "Bad msgIdx too small "; assert(isMsgIdxStartNewMessage(idx, rw)) : "Bad msgIdx is not a starting point. "; //This validation is very important, because all down stream consumers will assume it to be true. assert(-1 ==idx || (rw.from.hasSimpleMessagesOnly && 0==rw.msgIdx && rw.from.messageStarts.length==1) || TypeMask.Group == TokenBuilder.extractType(rw.from.tokens[rw.msgIdx])) : errorMessageForMessageStartValidation(rw, llwHeadPosCache); assert(-1 ==idx || (rw.from.hasSimpleMessagesOnly && 0==rw.msgIdx && rw.from.messageStarts.length==1) || (OperatorMask.Group_Bit_Close&TokenBuilder.extractOper(rw.from.tokens[rw.msgIdx])) == 0) : errorMessageForMessageStartValidation(rw, llwHeadPosCache); }
static void setMsgIdx(StackStateWalker rw, int idx, long llwHeadPosCache) { rw.msgIdxPrev = rw.msgIdx; rw.msgIdx = idx; assert(idx < rw.from.fragDataSize.length) : "Bad msgIdx out of range"; assert(idx>-3): "Bad msgIdx too small "; assert(isMsgIdxStartNewMessage(idx, rw)) : "Bad msgIdx is not a starting point. "; //This validation is very important, because all down stream consumers will assume it to be true. assert(-1 ==idx || (rw.from.hasSimpleMessagesOnly && 0==rw.msgIdx && rw.from.messageStarts.length==1) || TypeMask.Group == TokenBuilder.extractType(rw.from.tokens[rw.msgIdx])) : errorMessageForMessageStartValidation(rw, llwHeadPosCache); assert(-1 ==idx || (rw.from.hasSimpleMessagesOnly && 0==rw.msgIdx && rw.from.messageStarts.length==1) || (OperatorMask.Group_Bit_Close&TokenBuilder.extractOper(rw.from.tokens[rw.msgIdx])) == 0) : errorMessageForMessageStartValidation(rw, llwHeadPosCache); }
private static void prepReadMessage2Normal(Pipe ringBuffer, StackStateWalker ringBufferConsumer, final long tmpNextWorkingTail, int[] fragDataSize) { ringBufferConsumer.nextWorkingTail = tmpNextWorkingTail + fragDataSize[ringBufferConsumer.msgIdx];//save the size of this new fragment we are about to read //This validation is very important, because all down stream consumers will assume it to be true. assert((ringBufferConsumer.from.hasSimpleMessagesOnly && 0==readMsgIdx(ringBuffer, ringBufferConsumer, tmpNextWorkingTail) && ringBufferConsumer.from.messageStarts.length==1) || TypeMask.Group == TokenBuilder.extractType(ringBufferConsumer.from.tokens[readMsgIdx(ringBuffer, ringBufferConsumer, tmpNextWorkingTail)])) : "Templated message must start with group open and this starts with "+TokenBuilder.tokenToString(ringBufferConsumer.from.tokens[readMsgIdx(ringBuffer, ringBufferConsumer, tmpNextWorkingTail)]); assert((ringBufferConsumer.from.hasSimpleMessagesOnly && 0==readMsgIdx(ringBuffer, ringBufferConsumer, tmpNextWorkingTail) && ringBufferConsumer.from.messageStarts.length==1) || (OperatorMask.Group_Bit_Close&TokenBuilder.extractOper(ringBufferConsumer.from.tokens[readMsgIdx(ringBuffer, ringBufferConsumer, tmpNextWorkingTail)])) == 0) : "Templated message must start with group open and this starts with "+TokenBuilder.tokenToString(ringBufferConsumer.from.tokens[readMsgIdx(ringBuffer, ringBufferConsumer, tmpNextWorkingTail)]); ringBufferConsumer.cursor = ringBufferConsumer.msgIdx; int lastScriptPos = (ringBufferConsumer.nextCursor = ringBufferConsumer.msgIdx + ringBufferConsumer.from.fragScriptSize[ringBufferConsumer.msgIdx]) -1; if (TypeMask.GroupLength == ((ringBufferConsumer.from.tokens[lastScriptPos] >>> TokenBuilder.SHIFT_TYPE) & TokenBuilder.MASK_TYPE)) { //Can not assume end of message any more. int seqLength = Pipe.slab((Pipe<?>) ringBuffer)[(int)(ringBufferConsumer.from.fragDataSize[lastScriptPos] + tmpNextWorkingTail)&ringBuffer.slabMask]; final StackStateWalker ringBufferConsumer1 = ringBufferConsumer; //now start new sequence ringBufferConsumer1.seqStack[++ringBufferConsumer1.seqStackHead] = seqLength; ringBufferConsumer1.seqCursors[ringBufferConsumer1.seqStackHead] = ringBufferConsumer1.nextCursor; } }