private void corrupt(FileStatus file) throws IOException { LOG.info("Corrupt " + file); Path tmpFile = file.getPath().suffix(".tmp"); // remove the last byte to make the trailer corrupted try (FSDataInputStream in = fs.open(file.getPath()); FSDataOutputStream out = fs.create(tmpFile)) { ByteStreams.copy(ByteStreams.limit(in, file.getLen() - 1), out); } fs.delete(file.getPath(), false); fs.rename(tmpFile, file.getPath()); }
/** * @return the set of clusterIds that have consumed the mutation */ public List<UUID> getClusterIds() { List<UUID> clusterIds = new ArrayList<>(); byte[] bytes = getAttribute(CONSUMED_CLUSTER_IDS); if(bytes != null) { ByteArrayDataInput in = ByteStreams.newDataInput(bytes); int numClusters = in.readInt(); for(int i=0; i<numClusters; i++){ clusterIds.add(new UUID(in.readLong(), in.readLong())); } } return clusterIds; }
/** * Marks that the clusters with the given clusterIds have consumed the mutation * @param clusterIds of the clusters that have consumed the mutation */ public Mutation setClusterIds(List<UUID> clusterIds) { ByteArrayDataOutput out = ByteStreams.newDataOutput(); out.writeInt(clusterIds.size()); for (UUID clusterId : clusterIds) { out.writeLong(clusterId.getMostSignificantBits()); out.writeLong(clusterId.getLeastSignificantBits()); } setAttribute(CONSUMED_CLUSTER_IDS, out.toByteArray()); return this; }
/** * This version of protobuf's mergeDelimitedFrom avoid the hard-coded 64MB limit for decoding * buffers * @param builder current message builder * @param in Inputsream with delimited protobuf data * @throws IOException */ public static void mergeDelimitedFrom(Message.Builder builder, InputStream in) throws IOException { // This used to be builder.mergeDelimitedFrom(in); // but is replaced to allow us to bump the protobuf size limit. final int firstByte = in.read(); if (firstByte != -1) { final int size = CodedInputStream.readRawVarint32(firstByte, in); final InputStream limitedInput = ByteStreams.limit(in, size); final CodedInputStream codedInput = CodedInputStream.newInstance(limitedInput); codedInput.setSizeLimit(size); builder.mergeFrom(codedInput); codedInput.checkLastTagWas(0); } }
jarStream.putNextEntry(entry); ByteStreams.copy(classStream, jarStream);
private InputStream sliceStream(InputStream in) throws IOException { if (offset > 0) { long skipped; try { skipped = ByteStreams.skipUpTo(in, offset); } catch (Throwable e) { Closer closer = Closer.create(); closer.register(in); try { throw closer.rethrow(e); } finally { closer.close(); } } if (skipped < offset) { // offset was beyond EOF in.close(); return new ByteArrayInputStream(new byte[0]); } } return ByteStreams.limit(in, length); }
@Test public void test() throws Exception { try (CloseableHttpClient client = HttpClientBuilder.create().build()) { HttpPut put = new HttpPut( String.format(URL_TEMPLCATE, PORT, TABLE_NAME.getNameAsString(), ROW, FAMILY, QUALIFIER)); put.setEntity(EntityBuilder.create().setText(VALUE) .setContentType(ContentType.create("text-plain", StandardCharsets.UTF_8)).build()); try (CloseableHttpResponse resp = client.execute(put)) { assertEquals(HttpStatus.SC_OK, resp.getStatusLine().getStatusCode()); } HttpGet get = new HttpGet( String.format(URL_TEMPLCATE, PORT, TABLE_NAME.getNameAsString(), ROW, FAMILY, QUALIFIER)); try (CloseableHttpResponse resp = client.execute(get)) { assertEquals(HttpStatus.SC_OK, resp.getStatusLine().getStatusCode()); assertEquals("value", Bytes.toString(ByteStreams.toByteArray(resp.getEntity().getContent()))); } get = new HttpGet(String.format(URL_TEMPLCATE, PORT, TABLE_NAME.getNameAsString(), "whatever", FAMILY, QUALIFIER)); try (CloseableHttpResponse resp = client.execute(get)) { assertEquals(HttpStatus.SC_NOT_FOUND, resp.getStatusLine().getStatusCode()); } } } }
/** * Discards up to {@code n} bytes of data from the input stream. This method will block until * either the full amount has been skipped or until the end of the stream is reached, whichever * happens first. Returns the total number of bytes skipped. */ static long skipUpTo(InputStream in, final long n) throws IOException { long totalSkipped = 0; byte[] buf = createBuffer(); while (totalSkipped < n) { long remaining = n - totalSkipped; long skipped = skipSafely(in, remaining); if (skipped == 0) { // Do a buffered read since skipSafely could return 0 repeatedly, for example if // in.available() always returns 0 (the default). int skip = (int) Math.min(remaining, buf.length); if ((skipped = in.read(buf, 0, skip)) == -1) { // Reached EOF break; } } totalSkipped += skipped; } return totalSkipped; }
/** * Checks that the contents of this byte source are equal to the contents of the given byte * source. * * @throws IOException if an I/O error occurs while reading from this source or {@code other} */ public boolean contentEquals(ByteSource other) throws IOException { checkNotNull(other); byte[] buf1 = createBuffer(); byte[] buf2 = createBuffer(); Closer closer = Closer.create(); try { InputStream in1 = closer.register(openStream()); InputStream in2 = closer.register(other.openStream()); while (true) { int read1 = ByteStreams.read(in1, buf1, 0, buf1.length); int read2 = ByteStreams.read(in2, buf2, 0, buf2.length); if (read1 != read2 || !Arrays.equals(buf1, buf2)) { return false; } else if (read1 != buf1.length) { return true; } } } catch (Throwable e) { throw closer.rethrow(e); } finally { closer.close(); } }
/** * Reads and discards data from the given {@code InputStream} until the end of the stream is * reached. Returns the total number of bytes read. Does not close the stream. * * @since 20.0 */ @CanIgnoreReturnValue public static long exhaust(InputStream in) throws IOException { long total = 0; long read; byte[] buf = createBuffer(); while ((read = in.read(buf)) != -1) { total += read; } return total; }
"entry size= " + size + " at offset = " + this.inputStream.getPos()); ProtobufUtil.mergeFrom(builder, ByteStreams.limit(this.inputStream, size), (int)size); } catch (InvalidProtocolBufferException ipbe) {
/** * Reads all bytes from an input stream into a byte array. Does not close the stream. * * @param in the input stream to read from * @return a byte array containing all the bytes from the stream * @throws IOException if an I/O error occurs */ public static byte[] toByteArray(InputStream in) throws IOException { // Presize the ByteArrayOutputStream since we know how large it will need // to be, unless that value is less than the default ByteArrayOutputStream // size (32). ByteArrayOutputStream out = new ByteArrayOutputStream(Math.max(32, in.available())); copy(in, out); return out.toByteArray(); }
/** * Reads a file of the given expected size from the given input stream, if it will fit into a byte * array. This method handles the case where the file size changes between when the size is read * and when the contents are read from the stream. */ static byte[] readFile(InputStream in, long expectedSize) throws IOException { if (expectedSize > Integer.MAX_VALUE) { throw new OutOfMemoryError( "file is too large to fit in a byte array: " + expectedSize + " bytes"); } // some special files may return size 0 but have content, so read // the file normally in that case return expectedSize == 0 ? ByteStreams.toByteArray(in) : ByteStreams.toByteArray(in, (int) expectedSize); }
ByteBuffer buf = ByteBuffer.wrap(createBuffer()); long total = 0; while (from.read(buf) != -1) {
private void corrupt(FileStatus file) throws IOException { LOG.info("Corrupt " + file); Path tmpFile = file.getPath().suffix(".tmp"); // remove the last byte to make the trailer corrupted try (FSDataInputStream in = fs.open(file.getPath()); FSDataOutputStream out = fs.create(tmpFile)) { ByteStreams.copy(ByteStreams.limit(in, file.getLen() - 1), out); } fs.delete(file.getPath(), false); fs.rename(tmpFile, file.getPath()); }
/** * This version of protobuf's mergeDelimitedFrom avoid the hard-coded 64MB limit for decoding * buffers * @param builder current message builder * @param in Inputsream with delimited protobuf data * @throws IOException */ public static void mergeDelimitedFrom(Message.Builder builder, InputStream in) throws IOException { // This used to be builder.mergeDelimitedFrom(in); // but is replaced to allow us to bump the protobuf size limit. final int firstByte = in.read(); if (firstByte != -1) { final int size = CodedInputStream.readRawVarint32(firstByte, in); final InputStream limitedInput = ByteStreams.limit(in, size); final CodedInputStream codedInput = CodedInputStream.newInstance(limitedInput); codedInput.setSizeLimit(size); builder.mergeFrom(codedInput); codedInput.checkLastTagWas(0); } }
/** * @return the set of clusterIds that have consumed the mutation */ public List<UUID> getClusterIds() { List<UUID> clusterIds = new ArrayList<>(); byte[] bytes = getAttribute(CONSUMED_CLUSTER_IDS); if(bytes != null) { ByteArrayDataInput in = ByteStreams.newDataInput(bytes); int numClusters = in.readInt(); for(int i=0; i<numClusters; i++){ clusterIds.add(new UUID(in.readLong(), in.readLong())); } } return clusterIds; }
/** * Marks that the clusters with the given clusterIds have consumed the mutation * @param clusterIds of the clusters that have consumed the mutation */ public Mutation setClusterIds(List<UUID> clusterIds) { ByteArrayDataOutput out = ByteStreams.newDataOutput(); out.writeInt(clusterIds.size()); for (UUID clusterId : clusterIds) { out.writeLong(clusterId.getMostSignificantBits()); out.writeLong(clusterId.getLeastSignificantBits()); } setAttribute(CONSUMED_CLUSTER_IDS, out.toByteArray()); return this; }
/** * Reads the full contents of this byte source as a byte array. * * @throws IOException if an I/O error occurs while reading from this source */ public byte[] read() throws IOException { Closer closer = Closer.create(); try { InputStream in = closer.register(openStream()); return ByteStreams.toByteArray(in); } catch (Throwable e) { throw closer.rethrow(e); } finally { closer.close(); } }