@Override public int buffered() { return buffer.size(); }
/** * Returns the on-disk size of the data portion of the block. This is the * compressed size if compression is enabled. Can only be called in the * "block ready" state. Header is not compressed, and its size is not * included in the return value. * * @return the on-disk size of the block, not including the header. */ int getOnDiskSizeWithoutHeader() { expectState(State.BLOCK_READY); return onDiskBlockBytesWithHeader.size() + onDiskChecksum.length - HConstants.HFILEBLOCK_HEADER_SIZE; }
@Override public void close() throws IOException { Preconditions.checkState(buffer.size() == 0, "should call flush first before calling close"); executor.shutdown(); out.close(); }
/** * Returns the on-disk size of the block. Can only be called in the * "block ready" state. * * @return the on-disk size of the block ready to be written, including the * header size, the data and the checksum data. */ int getOnDiskSizeWithHeader() { expectState(State.BLOCK_READY); return onDiskBlockBytesWithHeader.size() + onDiskChecksum.length; }
/** * The uncompressed size of the block data. Does not include header size. */ int getUncompressedSizeWithoutHeader() { expectState(State.BLOCK_READY); return baosInMemory.size() - HConstants.HFILEBLOCK_HEADER_SIZE; }
/** * The uncompressed size of the block data, including header size. */ int getUncompressedSizeWithHeader() { expectState(State.BLOCK_READY); return baosInMemory.size(); }
public void flush() throws IOException { int onDiskDataSize = 0; if (startOffset >= 0) { onDiskDataSize = out.size() - startOffset; } out.writeInt(rowsOffsetBAOS.size() / 4); if (rowsOffsetBAOS.size() > 0) { out.write(rowsOffsetBAOS.getBuffer(), 0, rowsOffsetBAOS.size()); } out.writeInt(onDiskDataSize); if (LOG.isTraceEnabled()) { LOG.trace("RowNumber: " + rowsOffsetBAOS.size() / 4 + ", onDiskDataSize: " + onDiskDataSize + ", totalOnDiskSize: " + (out.size() - startOffset)); } }
/** * Returns the header or the compressed data (or uncompressed data when not * using compression) as a byte array. Can be called in the "writing" state * or in the "block ready" state. If called in the "writing" state, * transitions the writer to the "block ready" state. This returns * the header + data + checksums stored on disk. * * @return header and data as they would be stored on disk in a byte array * @throws IOException */ byte[] getHeaderAndDataForTest() throws IOException { ensureBlockReady(); // This is not very optimal, because we are doing an extra copy. // But this method is used only by unit tests. byte[] output = new byte[onDiskBlockBytesWithHeader.size() + onDiskChecksum.length]; System.arraycopy(onDiskBlockBytesWithHeader.getBuffer(), 0, output, 0, onDiskBlockBytesWithHeader.size()); System.arraycopy(onDiskChecksum, 0, output, onDiskBlockBytesWithHeader.size(), onDiskChecksum.length); return output; }
private void flush0(CompletableFuture<Long> future, ByteArrayOutputStream buffer, boolean sync) { try { if (buffer.size() > 0) { out.write(buffer.getBuffer(), 0, buffer.size()); if (sync) { out.hsync(); } else { out.hflush(); } } future.complete(out.getPos()); } catch (IOException e) { future.completeExceptionally(e); return; } }
/** * Clones the header followed by the uncompressed data, even if using * compression. This is needed for storing uncompressed blocks in the block * cache. Can be called in the "writing" state or the "block ready" state. * Returns only the header and data, does not include checksum data. * * @return Returns a copy of uncompressed block bytes for caching on write */ @VisibleForTesting ByteBuffer cloneUncompressedBufferWithHeader() { expectState(State.BLOCK_READY); byte[] uncompressedBlockBytesWithHeader = baosInMemory.toByteArray(); int numBytes = (int) ChecksumUtil.numBytes( onDiskBlockBytesWithHeader.size(), fileContext.getBytesPerChecksum()); putHeader(uncompressedBlockBytesWithHeader, 0, onDiskBlockBytesWithHeader.size() + numBytes, baosInMemory.size(), onDiskBlockBytesWithHeader.size()); return ByteBuffer.wrap(uncompressedBlockBytesWithHeader); }
/** * Writes the header and the compressed data of this block (or uncompressed * data when not using compression) into the given stream. Can be called in * the "writing" state or in the "block ready" state. If called in the * "writing" state, transitions the writer to the "block ready" state. * * @param out the output stream to write the * @throws IOException */ protected void finishBlockAndWriteHeaderAndData(DataOutputStream out) throws IOException { ensureBlockReady(); long startTime = System.currentTimeMillis(); out.write(onDiskBlockBytesWithHeader.getBuffer(), 0, onDiskBlockBytesWithHeader.size()); out.write(onDiskChecksum); HFile.updateWriteLatency(System.currentTimeMillis() - startTime); }
return ByteBuffer.wrap(baos.getBuffer(), 0, baos.size());
BlockingRpcConnection(BlockingRpcClient rpcClient, ConnectionId remoteId) throws IOException { super(rpcClient.conf, AbstractRpcClient.WHEEL_TIMER, remoteId, rpcClient.clusterId, rpcClient.userProvider.isHBaseSecurityEnabled(), rpcClient.codec, rpcClient.compressor); this.rpcClient = rpcClient; if (remoteId.getAddress().isUnresolved()) { throw new UnknownHostException("unknown host: " + remoteId.getAddress().getHostName()); } this.connectionHeaderPreamble = getConnectionHeaderPreamble(); ConnectionHeader header = getConnectionHeader(); ByteArrayOutputStream baos = new ByteArrayOutputStream(4 + header.getSerializedSize()); DataOutputStream dos = new DataOutputStream(baos); dos.writeInt(header.getSerializedSize()); header.writeTo(dos); assert baos.size() == 4 + header.getSerializedSize(); this.connectionHeaderWithLength = baos.getBuffer(); UserGroupInformation ticket = remoteId.ticket.getUGI(); this.threadName = "IPC Client (" + this.rpcClient.socketFactory.hashCode() + ") connection to " + remoteId.getAddress().toString() + ((ticket == null) ? " from an unknown user" : (" from " + ticket.getUserName())); if (this.rpcClient.conf.getBoolean(BlockingRpcClient.SPECIFIC_WRITE_THREAD, false)) { callSender = new CallSender(threadName, this.rpcClient.conf); callSender.start(); } else { callSender = null; } }
if (blockType == BlockType.DATA || blockType == BlockType.ENCODED_DATA) { compressAndEncryptDat = dataBlockEncodingCtx. compressAndEncrypt(baosInMemory.getBuffer(), 0, baosInMemory.size()); } else { compressAndEncryptDat = defaultBlockEncodingCtx. compressAndEncrypt(baosInMemory.getBuffer(), 0, baosInMemory.size()); compressAndEncryptDat = new Bytes(baosInMemory.getBuffer(), 0, baosInMemory.size()); onDiskBlockBytesWithHeader.size(), fileContext.getBytesPerChecksum()); onDiskBlockBytesWithHeader.size() + numBytes, baosInMemory.size(), onDiskBlockBytesWithHeader.size()); if (onDiskChecksum.length != numBytes) { onDiskChecksum = new byte[numBytes]; onDiskBlockBytesWithHeader.getBuffer(), 0,onDiskBlockBytesWithHeader.size(), onDiskChecksum, 0, fileContext.getChecksumType(), fileContext.getBytesPerChecksum());
static ByteBuffer encodeKeyValues(DataBlockEncoding encoding, List<KeyValue> kvs, HFileBlockEncodingContext encodingContext, boolean useOffheapData) throws IOException { DataBlockEncoder encoder = encoding.getEncoder(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); baos.write(HFILEBLOCK_DUMMY_HEADER); DataOutputStream dos = new DataOutputStream(baos); encoder.startBlockEncoding(encodingContext, dos); for (KeyValue kv : kvs) { encoder.encode(kv, encodingContext, dos); } encoder.endBlockEncoding(encodingContext, dos, baos.getBuffer()); byte[] encodedData = new byte[baos.size() - ENCODED_DATA_OFFSET]; System.arraycopy(baos.toByteArray(), ENCODED_DATA_OFFSET, encodedData, 0, encodedData.length); if (useOffheapData) { ByteBuffer bb = ByteBuffer.allocateDirect(encodedData.length); bb.put(encodedData); bb.rewind(); return bb; } return ByteBuffer.wrap(encodedData); }
Encryption.incrementIv(iv, 1 + (cryptoByteStream.size() / encryptor.getBlockSize())); return new Bytes(cryptoByteStream.getBuffer(), 0, cryptoByteStream.size()); } else { return new Bytes(cryptoByteStream.getBuffer(), 0, cryptoByteStream.size()); compressionStream.flush(); compressionStream.finish(); return new Bytes(compressedByteStream.getBuffer(), 0, compressedByteStream.size()); } else { return null;
cloneUncompressedBufferWithHeader(), FILL_HEADER, startOffset, UNSET, onDiskBlockBytesWithHeader.size() + onDiskChecksum.length, newContext);
BlockingRpcConnection(BlockingRpcClient rpcClient, ConnectionId remoteId) throws IOException { super(rpcClient.conf, AbstractRpcClient.WHEEL_TIMER, remoteId, rpcClient.clusterId, rpcClient.userProvider.isHBaseSecurityEnabled(), rpcClient.codec, rpcClient.compressor); this.rpcClient = rpcClient; if (remoteId.getAddress().isUnresolved()) { throw new UnknownHostException("unknown host: " + remoteId.getAddress().getHostName()); } this.connectionHeaderPreamble = getConnectionHeaderPreamble(); ConnectionHeader header = getConnectionHeader(); ByteArrayOutputStream baos = new ByteArrayOutputStream(4 + header.getSerializedSize()); DataOutputStream dos = new DataOutputStream(baos); dos.writeInt(header.getSerializedSize()); header.writeTo(dos); assert baos.size() == 4 + header.getSerializedSize(); this.connectionHeaderWithLength = baos.getBuffer(); UserGroupInformation ticket = remoteId.ticket.getUGI(); this.threadName = "IPC Client (" + this.rpcClient.socketFactory.hashCode() + ") connection to " + remoteId.getAddress().toString() + ((ticket == null) ? " from an unknown user" : (" from " + ticket.getUserName())); if (this.rpcClient.conf.getBoolean(BlockingRpcClient.SPECIFIC_WRITE_THREAD, false)) { callSender = new CallSender(threadName, this.rpcClient.conf); callSender.start(); } else { callSender = null; } }
public void flush() throws IOException { int onDiskDataSize = 0; if (startOffset >= 0) { onDiskDataSize = out.size() - startOffset; } out.writeInt(rowsOffsetBAOS.size() / 4); if (rowsOffsetBAOS.size() > 0) { out.write(rowsOffsetBAOS.getBuffer(), 0, rowsOffsetBAOS.size()); } out.writeInt(onDiskDataSize); if (LOG.isTraceEnabled()) { LOG.trace("RowNumber: " + rowsOffsetBAOS.size() / 4 + ", onDiskDataSize: " + onDiskDataSize + ", totalOnDiskSize: " + (out.size() - startOffset)); } }
public void flush() throws IOException { int onDiskDataSize = 0; if (startOffset >= 0) { onDiskDataSize = out.size() - startOffset; } out.writeInt(rowsOffsetBAOS.size() / 4); if (rowsOffsetBAOS.size() > 0) { out.write(rowsOffsetBAOS.getBuffer(), 0, rowsOffsetBAOS.size()); } out.writeInt(onDiskDataSize); if (LOG.isTraceEnabled()) { LOG.trace("RowNumber: " + rowsOffsetBAOS.size() / 4 + ", onDiskDataSize: " + onDiskDataSize + ", totalOnDiskSize: " + (out.size() - startOffset)); } }