public AuthIds(byte[] bytes) throws Exception { DataInputStream dis = new DataInputStream(new ByteArrayInputStream(bytes)); if (bytes.length == 8) { // only connectionid connectionId = dis.readLong(); } else if (bytes.length == 16) { // first connectionId and then uniqueID connectionId = dis.readLong(); uniqueId = dis.readLong(); } else { throw new Exception("Auth ids are not in right form"); } }
private void loadCreationMeta(File crcFile) throws IOException { if (crcFile.exists()) { final DataInputStream ds = new DataInputStream(new FileInputStream(crcFile)); _crc = ds.readLong(); _creationTime = ds.readLong(); ds.close(); } }
private String readUUID(final DataInputStream in, final int serializationVersion) throws IOException { if (serializationVersion < 8) { final long msb = in.readLong(); final long lsb = in.readLong(); return new UUID(msb, lsb).toString(); } else { // before version 8, we serialized UUID's as two longs in order to // write less data. However, in version 8 we changed to just writing // out the string because it's extremely expensive to call UUID.fromString. // In the end, since we generally compress, the savings in minimal anyway. final String uuid = in.readUTF(); if (!UUID_PATTERN.matcher(uuid).matches()) { throw new IOException("Failed to parse Provenance Event Record: expected a UUID but got: " + uuid); } return uuid; } }
public static QDigest deserialize(byte[] b) { ByteArrayInputStream bis = new ByteArrayInputStream(b); DataInputStream s = new DataInputStream(bis); try { long size = s.readLong(); double compressionFactor = s.readDouble(); long capacity = s.readLong(); int count = s.readInt(); QDigest d = new QDigest(compressionFactor); d.size = size; d.capacity = capacity; for (int i = 0; i < count; ++i) { long k = s.readLong(); long n = s.readLong(); d.node2count.put(k, n); } return d; } catch (IOException e) { throw new RuntimeException(e); } }
@Test public void testSafeWithOnFailedConsumer() throws IOException { final ByteArrayOutputStream baos = new ByteArrayOutputStream(30); final DataOutputStream dos = new DataOutputStream(baos); LongConsumer consumer = LongConsumer.Util.safe(new UnsafeConsumer(dos), new LongConsumer() { @Override public void accept(long value) { baos.write(0); } }); consumer.accept(10L); consumer.accept(20L); consumer.accept(-5L); consumer.accept(-8L); consumer.accept(500L); final byte[] result = baos.toByteArray(); DataInputStream dis = new DataInputStream(new ByteArrayInputStream(result)); assertThat(dis.readLong(), is(10L)); assertThat(dis.readLong(), is(20L)); assertThat(dis.readByte(), is((byte) 0)); assertThat(dis.readByte(), is((byte) 0)); assertThat(dis.readLong(), is(500L)); }
private long[][] deserialize(final String bitsetResource) throws IOException { final DataInputStream dos = new DataInputStream( new GZIPInputStream(this.getClass().getResourceAsStream(bitsetResource))); try { final long[][] bitset = new long[dos.readInt()][]; for (int i = 0; i < bitset.length; i++) { final int wordSize = dos.readInt(); // for duplication, to make bitsets wider final int clone = 0; final long words[] = new long[wordSize * (clone + 1)]; for (int j = 0; j < wordSize; j++) { words[j] = dos.readLong(); } // duplicate long[] n times to the right for (int j = 0; j < clone; j++) { System.arraycopy(words, 0, words, (j + 1) * wordSize, wordSize); } bitset[i] = words; } return bitset; } finally { if (dos != null) { dos.close(); } } } }
@Override public SwapSummary getSwapSummary(final DataInputStream in, final String swapLocation, final ResourceClaimManager claimManager) throws IOException { final int swapEncodingVersion = in.readInt(); if (swapEncodingVersion > SWAP_ENCODING_VERSION) { final String errMsg = "Cannot swap FlowFiles in from " + swapLocation + " because the encoding version is " + swapEncodingVersion + ", which is too new (expecting " + SWAP_ENCODING_VERSION + " or less)"; throw new IOException(errMsg); } final int numRecords; final long contentSize; Long maxRecordId = null; try { in.readUTF(); // ignore Connection ID numRecords = in.readInt(); contentSize = in.readLong(); if (numRecords == 0) { return StandardSwapSummary.EMPTY_SUMMARY; } if (swapEncodingVersion > 7) { maxRecordId = in.readLong(); } } catch (final EOFException eof) { logger.warn("Found premature End-of-File when reading Swap File {}. EOF occurred before any FlowFiles were encountered", swapLocation); return StandardSwapSummary.EMPTY_SUMMARY; } final QueueSize queueSize = new QueueSize(numRecords, contentSize); final SwapContents swapContents = deserializeFlowFiles(in, queueSize, maxRecordId, swapEncodingVersion, claimManager, swapLocation); return swapContents.getSummary(); }
Class<?> type = element.field.getType(); if (type == int.class) { element.fieldValue = input.readInt(); } else if (type == byte.class) { element.fieldValue = input.readByte(); } else if (type == char.class) { element.fieldValue = input.readChar(); element.fieldValue = input.readBoolean(); } else if (type == long.class) { element.fieldValue = input.readLong(); } else if (type == float.class) { element.fieldValue = input.readFloat();
DataInputStream dis = new DataInputStream(new FileInputStream(outputFile)); for (int i = 0; i < 3; i++) { long size = dis.readLong(); Assert.assertEquals(size, records[i].length + 1); for (int j = 0; j < size - 1; j++) { Assert.assertEquals(dis.readByte(), records[i][j]); Assert.assertEquals(dis.readByte(), '\n');
boolean handleRequest(DataInputStream in, DataOutputStream out) throws IOException { while(true) { byte type=(byte)in.read(); if(type == -1) return false; switch(type) { case START: int num=in.readInt(); startTest(num); break; case RECEIVE_ASYNC: case RECEIVE_SYNC: long val=in.readLong(); int len=in.readInt(); byte data[]=new byte[len]; in.readFully(data, 0, data.length); receiveData(val, data); if(type == RECEIVE_SYNC) { out.writeLong(System.currentTimeMillis()); out.flush(); } break; default: System.err.println("type " + type + " not known"); } } }
protected long parseSize (final DataInputStream din, final byte type, final boolean useIntOnError, final long defaultValue) throws IOException { if (type == 'i') return (long)readUChar(din); if (type == 'I') return (long)readUShort(din); if (type == 'l') return (long)readUInt(din); if (type == 'L') return din.readLong(); if (useIntOnError) { long result = (long)((short)type & 0xFF) << 24; result |= (long)((short)din.readByte() & 0xFF) << 16; result |= (long)((short)din.readByte() & 0xFF) << 8; result |= (long)((short)din.readByte() & 0xFF); return result; } return defaultValue; }
public static CountMinSketch deserialize(byte[] data) { ByteArrayInputStream bis = new ByteArrayInputStream(data); DataInputStream s = new DataInputStream(bis); try { CountMinSketch sketch = new CountMinSketch(); sketch.size = s.readLong(); sketch.depth = s.readInt(); sketch.width = s.readInt(); sketch.eps = 2.0 / sketch.width; sketch.confidence = 1 - 1 / Math.pow(2, sketch.depth); sketch.hashA = new long[sketch.depth]; sketch.table = new long[sketch.depth][sketch.width]; for (int i = 0; i < sketch.depth; ++i) { sketch.hashA[i] = s.readLong(); for (int j = 0; j < sketch.width; ++j) { sketch.table[i][j] = s.readLong(); } } return sketch; } catch (IOException e) { // Shouldn't happen throw new RuntimeException(e); } }
@Override public Long deserialize(final byte[] input) throws DeserializationException, IOException { if ( input == null || input.length == 0 ) { return null; } final DataInputStream dis = new DataInputStream(new ByteArrayInputStream(input)); return dis.readLong(); }
private StartHeader readStartHeader(final long startHeaderCrc) throws IOException { final StartHeader startHeader = new StartHeader(); // using Stream rather than ByteBuffer for the benefit of the // built-in CRC check try (DataInputStream dataInputStream = new DataInputStream(new CRC32VerifyingInputStream( new BoundedSeekableByteChannelInputStream(channel, 20), 20, startHeaderCrc))) { startHeader.nextHeaderOffset = Long.reverseBytes(dataInputStream.readLong()); startHeader.nextHeaderSize = Long.reverseBytes(dataInputStream.readLong()); startHeader.nextHeaderCrc = 0xffffFFFFL & Integer.reverseBytes(dataInputStream.readInt()); return startHeader; } }
private SnapshotHeader validateHeader(final DataInputStream dataIn) throws IOException { final String snapshotClass = dataIn.readUTF(); logger.debug("Snapshot Class Name for {} is {}", storageDirectory, snapshotClass); if (!snapshotClass.equals(HashMapSnapshot.class.getName())) { throw new IOException("Write-Ahead Log Snapshot located at " + storageDirectory + " was written using the " + snapshotClass + " class; cannot restore using " + getClass().getName()); } final int snapshotVersion = dataIn.readInt(); logger.debug("Snapshot version for {} is {}", storageDirectory, snapshotVersion); if (snapshotVersion > getVersion()) { throw new IOException("Write-Ahead Log Snapshot located at " + storageDirectory + " was written using version " + snapshotVersion + " of the " + snapshotClass + " class; cannot restore using Version " + getVersion()); } final String serdeEncoding = dataIn.readUTF(); // ignore serde class name for now logger.debug("Serde encoding for Snapshot at {} is {}", storageDirectory, serdeEncoding); final int serdeVersion = dataIn.readInt(); logger.debug("Serde version for Snapshot at {} is {}", storageDirectory, serdeVersion); final long maxTransactionId = dataIn.readLong(); logger.debug("Max Transaction ID for Snapshot at {} is {}", storageDirectory, maxTransactionId); final int numRecords = dataIn.readInt(); logger.debug("Number of Records for Snapshot at {} is {}", storageDirectory, numRecords); final SerDe<T> serde = serdeFactory.createSerDe(serdeEncoding); serde.readHeader(dataIn); return new SnapshotHeader(serde, serdeVersion, maxTransactionId, numRecords); }
return new JsonValue((long)readUChar(din)); else if (type == 'i') return new JsonValue(oldFormat ? (long)din.readShort() : (long)din.readByte()); else if (type == 'I') return new JsonValue(oldFormat ? (long)din.readInt() : (long)din.readShort()); else if (type == 'l') return new JsonValue((long)din.readInt()); else if (type == 'L') return new JsonValue(din.readLong()); else if (type == 'd') return new JsonValue(din.readFloat());
FileInputStream in = new FileInputStream(idxFileName); in.getChannel().position(offsetInIndex); DataInputStream indexIn = new DataInputStream(in); long offset; try { long second; lastPosition.offsetInIndex = in.getChannel().position(); while ((second = indexIn.readLong()) < beginSecond) { offset = indexIn.readLong(); lastPosition.offsetInIndex = in.getChannel().position(); offset = indexIn.readLong(); lastPosition.metricFileName = metricFileName; lastPosition.indexFileName = idxFileName;
DataInputStream dis = new DataInputStream(new FileInputStream(outputFile)); for (int i = 0; i < 3; i++) { long size = dis.readLong(); Assert.assertEquals(size, records[i].length); for (int j = 0; j < size; j++) { Assert.assertEquals(dis.readByte(), records[i][j]);
/** * Read the root-level metadata of a multi-level block index. Based on * {@link #readRootIndex(DataInput, int)}, but also reads metadata * necessary to compute the mid-key in a multi-level index. * * @param blk the HFile block * @param numEntries the number of root-level index entries * @throws IOException */ public void readMultiLevelIndexRoot(HFileBlock blk, final int numEntries) throws IOException { DataInputStream in = readRootIndex(blk, numEntries); // after reading the root index the checksum bytes have to // be subtracted to know if the mid key exists. int checkSumBytes = blk.totalChecksumBytes(); if ((in.available() - checkSumBytes) < MID_KEY_METADATA_SIZE) { // No mid-key metadata available. return; } midLeafBlockOffset = in.readLong(); midLeafBlockOnDiskSize = in.readInt(); midKeyEntry = in.readInt(); }