static public int compareRaw(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) { try { int os1 = s1; { int i1 = org.apache.hadoop.record.Utils.readVInt(b1, s1); int i2 = org.apache.hadoop.record.Utils.readVInt(b2, s2); int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1); int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2); s1+=z1; s2+=z2; l1-=z1; l2-=z2; int r1 = org.apache.hadoop.record.Utils.compareBytes(b1,s1,i1,b2,s2,i2); if (r1 != 0) { return (r1<0)?-1:0; } s1+=i1; s2+=i2; l1-=i1; l1-=i2; } return (os1 - s1); } catch(java.io.IOException e) { throw new RuntimeException(e); } } public int compare(byte[] b1, int s1, int l1,
@Override public long readLong(final String tag) throws IOException { return Utils.readVLong(in); }
@Override public String readString(final String tag) throws IOException { return Utils.fromBinaryString(in); }
static public int slurpRaw(byte[] b, int s, int l) { try { int os = s; { int i = org.apache.hadoop.record.Utils.readVInt(b, s); int z = org.apache.hadoop.record.Utils.getVIntSize(i); s+=(z+i); l-= (z+i); } return (os - s); } catch(java.io.IOException e) { throw new RuntimeException(e); } } static public int compareRaw(byte[] b1, int s1, int l1,
static String fromBinaryString(final DataInput din) throws IOException { final int utf8Len = readVInt(din); final byte[] bytes = new byte[utf8Len]; din.readFully(bytes); } else if ((b1 & B11111) == B11110) { int b2 = bytes[len++] & 0xFF; checkB10(b2); int b3 = bytes[len++] & 0xFF; checkB10(b3); int b4 = bytes[len++] & 0xFF; checkB10(b4); cpt = utf8ToCodePoint(b1, b2, b3, b4); } else if ((b1 & B1111) == B1110) { int b2 = bytes[len++] & 0xFF; checkB10(b2); int b3 = bytes[len++] & 0xFF; checkB10(b3); cpt = utf8ToCodePoint(b1, b2, b3); } else if ((b1 & B111) == B110) { int b2 = bytes[len++] & 0xFF; checkB10(b2); cpt = utf8ToCodePoint(b1, b2); } else { throw new IOException("Invalid UTF-8 byte "+Integer.toHexString(b1)+ " at offset "+(len-1)+" in length of "+utf8Len); if (!isValidCodePoint(cpt)) { throw new IOException("Illegal Unicode Codepoint "+ Integer.toHexString(cpt)+" in stream.");
int os = s; long i = org.apache.hadoop.record.Utils.readVLong(b, s); int z = org.apache.hadoop.record.Utils.getVIntSize(i); s += z; l -= z; int i = org.apache.hadoop.record.Utils.readVInt(b, s); int z = org.apache.hadoop.record.Utils.getVIntSize(i); s += (z + i); l -= (z + i); int i = org.apache.hadoop.record.Utils.readVInt(b, s); int z = org.apache.hadoop.record.Utils.getVIntSize(i); s += (z + i); l -= (z + i); long i = org.apache.hadoop.record.Utils.readVLong(b, s); int z = org.apache.hadoop.record.Utils.getVIntSize(i); s += z; l -= z;
long i1 = org.apache.hadoop.record.Utils.readVLong(b1, s1); long i2 = org.apache.hadoop.record.Utils.readVLong(b2, s2); if (i1 != i2) { return ((i1 - i2) < 0) ? -1 : 0; int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1); int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2); s1 += z1; s2 += z2; int i1 = org.apache.hadoop.record.Utils.readVInt(b1, s1); int i2 = org.apache.hadoop.record.Utils.readVInt(b2, s2); int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1); int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2); s1 += z1; s2 += z2; l1 -= z1; l2 -= z2; int r1 = org.apache.hadoop.record.Utils.compareBytes(b1, s1, i1, b2, s2, i2); if (r1 != 0) { int i1 = org.apache.hadoop.record.Utils.readVInt(b1, s1); int i2 = org.apache.hadoop.record.Utils.readVInt(b2, s2); int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1); int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2); s1 += z1; s2 += z2; l1 -= z1;
int i1 = org.apache.hadoop.record.Utils.readVInt(b1, s1); int i2 = org.apache.hadoop.record.Utils.readVInt(b2, s2); if (i1 != i2) { return ((i1-i2) < 0) ? -1 : 0; int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1); int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2); s1+=z1; s2+=z2; l1-=z1; l2-=z2; long i1 = org.apache.hadoop.record.Utils.readVLong(b1, s1); long i2 = org.apache.hadoop.record.Utils.readVLong(b2, s2); if (i1 != i2) { return ((i1-i2) < 0) ? -1 : 0; int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1); int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2); s1+=z1; s2+=z2; l1-=z1; l2-=z2; float f1 = org.apache.hadoop.record.Utils.readFloat(b1, s1); float f2 = org.apache.hadoop.record.Utils.readFloat(b2, s2); if (f1 != f2) { return ((f1-f2) < 0) ? -1 : 0; double d1 = org.apache.hadoop.record.Utils.readDouble(b1, s1); double d2 = org.apache.hadoop.record.Utils.readDouble(b2, s2); if (d1 != d2) { return ((d1-d2) < 0) ? -1 : 0; int i1 = org.apache.hadoop.record.Utils.readVInt(b1, s1);
@Override public int readInt(final String tag) throws IOException { return Utils.readVInt(in); }
/** * verify that hash equals to HMacHash(msg) * @param newHash * @return true if is the same */ private static boolean verifyHash(byte[] hash, byte[] msg, SecretKey key) { byte[] msg_hash = generateByteHash(msg, key); return Utils.compareBytes(msg_hash, 0, msg_hash.length, hash, 0, hash.length) == 0; }
@Override public void writeBuffer(Buffer buf, String tag) throws IOException { printCommaUnlessFirst(); stream.print(Utils.toCSVBuffer(buf)); throwExceptionOnError(tag); }
@Override public void writeString(String s, String tag) throws IOException { printCommaUnlessFirst(); stream.print(Utils.toCSVString(s)); throwExceptionOnError(tag); }
@Override public void writeString(String s, String tag) throws IOException { Utils.toBinaryString(out, s); }
/** * * @param s * @return */ static String fromXMLString(String s) { StringBuilder sb = new StringBuilder(); for (int idx = 0; idx < s.length();) { char ch = s.charAt(idx++); if (ch == '%') { int ch1 = h2c(s.charAt(idx++)) << 12; int ch2 = h2c(s.charAt(idx++)) << 8; int ch3 = h2c(s.charAt(idx++)) << 4; int ch4 = h2c(s.charAt(idx++)); char res = (char)(ch1 | ch2 | ch3 | ch4); sb.append(res); } else { sb.append(ch); } } return sb.toString(); }
private static void initStrings(Record[] strings) { final int STRLEN = 32; for (int idx = 0; idx < strings.length; idx++) { strings[idx] = new RecString(); int strlen = rand.nextInt(STRLEN); StringBuilder sb = new StringBuilder(strlen); for (int ich = 0; ich < strlen; ich++) { int cpt = 0; while (true) { cpt = rand.nextInt(0x10FFFF+1); if (Utils.isValidCodePoint(cpt)) { break; } } sb.appendCodePoint(cpt); } ((RecString)strings[idx]).setData(sb.toString()); } }
static String fromBinaryString(final DataInput din) throws IOException { final int utf8Len = readVInt(din); final byte[] bytes = new byte[utf8Len]; din.readFully(bytes); } else if ((b1 & B11111) == B11110) { int b2 = bytes[len++] & 0xFF; checkB10(b2); int b3 = bytes[len++] & 0xFF; checkB10(b3); int b4 = bytes[len++] & 0xFF; checkB10(b4); cpt = utf8ToCodePoint(b1, b2, b3, b4); } else if ((b1 & B1111) == B1110) { int b2 = bytes[len++] & 0xFF; checkB10(b2); int b3 = bytes[len++] & 0xFF; checkB10(b3); cpt = utf8ToCodePoint(b1, b2, b3); } else if ((b1 & B111) == B110) { int b2 = bytes[len++] & 0xFF; checkB10(b2); cpt = utf8ToCodePoint(b1, b2); } else { throw new IOException("Invalid UTF-8 byte "+Integer.toHexString(b1)+ " at offset "+(len-1)+" in length of "+utf8Len); if (!isValidCodePoint(cpt)) { throw new IOException("Illegal Unicode Codepoint "+ Integer.toHexString(cpt)+" in stream.");
int os = s; long i = org.apache.hadoop.record.Utils.readVLong(b, s); int z = org.apache.hadoop.record.Utils.getVIntSize(i); s += z; l -= z; int mi1 = org.apache.hadoop.record.Utils.readVInt(b, s); int mz1 = org.apache.hadoop.record.Utils.getVIntSize(mi1); s += mz1; l -= mz1; for (int midx1 = 0; midx1 < mi1; midx1++) { int i = org.apache.hadoop.record.Utils.readVInt(b, s); int z = org.apache.hadoop.record.Utils.getVIntSize(i); s += (z + i); l -= (z + i); int i = org.apache.hadoop.record.Utils.readVInt(b, s); int z = org.apache.hadoop.record.Utils.getVIntSize(i); s += z + i; l -= (z + i);
long i1 = org.apache.hadoop.record.Utils.readVLong(b1, s1); long i2 = org.apache.hadoop.record.Utils.readVLong(b2, s2); if (i1 != i2) { return ((i1 - i2) < 0) ? -1 : 0; int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1); int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2); s1 += z1; s2 += z2; int mi11 = org.apache.hadoop.record.Utils.readVInt(b1, s1); int mi21 = org.apache.hadoop.record.Utils.readVInt(b2, s2); int mz11 = org.apache.hadoop.record.Utils.getVIntSize(mi11); int mz21 = org.apache.hadoop.record.Utils.getVIntSize(mi21); s1 += mz11; s2 += mz21; int i1 = org.apache.hadoop.record.Utils.readVInt(b1, s1); int i2 = org.apache.hadoop.record.Utils.readVInt(b2, s2); int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1); int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2); s1 += z1; s2 += z2; l1 -= z1; l2 -= z2; int r1 = org.apache.hadoop.record.Utils.compareBytes(b1, s1, i1, b2, s2, i2); if (r1 != 0) {
@Override public int readInt(final String tag) throws IOException { return Utils.readVInt(in); }
static public int slurpRaw(byte[] b, int s, int l) { try { int os = s; { int i = org.apache.hadoop.record.Utils.readVInt(b, s); int z = org.apache.hadoop.record.Utils.getVIntSize(i); s+=(z+i); l-= (z+i); } return (os - s); } catch(java.io.IOException e) { throw new RuntimeException(e); } } static public int compareRaw(byte[] b1, int s1, int l1,