try { compressedByteStream.write(COMPRESS_MARKER); OutputStream compressionStream = COMPRESS_ALGO.createCompressionStream(compressedByteStream, COMPRESS_ALGO.getCompressor(), 0); compressionStream.write(buffer, 1, buffer.length - 1); compressionStream.flush();
1)) { InputStream decompressionStream = DistinctValueWithCountServerAggregator.COMPRESS_ALGO .createDecompressionStream(is, DistinctValueWithCountServerAggregator.COMPRESS_ALGO.getDecompressor(), 0); is = decompressionStream;
setTimeToLive(timeToLive); setCompressionType(Compression.Algorithm. valueOf(compression.toUpperCase())); setEncodeOnDisk(encodeOnDisk); setDataBlockEncoding(DataBlockEncoding.
compressor = compressionAlgorithm.getCompressor(); compressedByteStream = new ByteArrayOutputStream(); try { compressionStream = compressionAlgorithm.createPlainCompressionStream( compressedByteStream, compressor); } catch (IOException e) {
columnDescriptor.setCompressionType(Algorithm.valueOf(compression)); if(blockCache != null) columnDescriptor.setBlockCacheEnabled(Boolean.parseBoolean(blockCache));
/** * Decompresses data from the given stream using the configured compression * algorithm. * @param dest * @param destOffset * @param bufferedBoundedStream * a stream to read compressed data from, bounded to the exact * amount of compressed data * @param uncompressedSize * uncompressed data size, header not included * @throws IOException */ protected void decompress(byte[] dest, int destOffset, InputStream bufferedBoundedStream, int uncompressedSize) throws IOException { Decompressor decompressor = null; try { decompressor = compressAlgo.getDecompressor(); InputStream is = compressAlgo.createDecompressionStream( bufferedBoundedStream, decompressor, 0); IOUtils.readFully(is, dest, destOffset, uncompressedSize); is.close(); } finally { if (decompressor != null) { compressAlgo.returnDecompressor(decompressor); } } }
public HFileSortedOplogWriter(int keys) throws IOException { try { int hfileBlockSize = Integer.getInteger( HoplogConfig.HFILE_BLOCK_SIZE_CONF, (1 << 16)); Algorithm compress = Algorithm.valueOf(System.getProperty(HoplogConfig.COMPRESSION, HoplogConfig.COMPRESSION_DEFAULT)); // ByteComparator bc = new ByteComparator(); writer = HFile.getWriterFactory(conf, cacheConf) .withPath(fsProvider.getFS(), path) .withBlockSize(hfileBlockSize) // .withComparator(bc) .withCompression(compress) .create(); // bfw = BloomFilterFactory.createGeneralBloomAtWrite(conf, cacheConf, BloomType.ROW, keys, // writer, bc); bfw = BloomFilterFactory.createGeneralBloomAtWrite(conf, cacheConf, BloomType.ROW, keys, writer); if (logger.isDebugEnabled()) logger.debug("{}Created hoplog writer with compression " + compress, logPrefix); } catch (IOException e) { if (logger.isDebugEnabled()) logger.debug("{}IO Error while creating writer", logPrefix); throw e; } }
/** * Sets up a compressor and creates a compression stream on top of * this.outputStream. Get one per block written. * * @return A compressing stream; if 'none' compression, returned stream does * not compress. * * @throws IOException * * @see {@link #releaseCompressingStream(DataOutputStream)} */ private DataOutputStream getCompressingStream() throws IOException { this.compressor = compressAlgo.getCompressor(); // Get new DOS compression stream. In tfile, the DOS, is not closed, // just finished, and that seems to be fine over there. TODO: Check // no memory retention of the DOS. Should I disable the 'flush' on the // DOS as the BCFile over in tfile does? It wants to make it so flushes // don't go through to the underlying compressed stream. Flush on the // compressed downstream should be only when done. I was going to but // looks like when we call flush in here, its legitimate flush that // should go through to the compressor. OutputStream os = this.compressAlgo.createCompressionStream( this.outputStream, this.compressor, 0); return new DataOutputStream(os); }
/** * Get supported compression algorithms. * * @return supported compression algorithms. */ public static Compression.Algorithm[] getSupportedCompressionAlgorithms() { String[] allAlgos = HFile.getSupportedCompressionAlgorithms(); List<Compression.Algorithm> supportedAlgos = new ArrayList<Compression.Algorithm>(); for (String algoName : allAlgos) { try { Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName); algo.getCompressor(); supportedAlgos.add(algo); } catch (Throwable t) { // this algo is not available } } return supportedAlgos.toArray(new Compression.Algorithm[0]); } }
public static void testCompression(Compression.Algorithm algo) throws IOException { if (compressionTestResults[algo.ordinal()] != null) { if (compressionTestResults[algo.ordinal()]) { return ; // already passed test, dont do it again. } else { // failed. throw new IOException("Compression algorithm '" + algo.getName() + "'" + " previously failed test."); } } Configuration conf = HBaseConfiguration.create(); try { Compressor c = algo.getCompressor(); algo.returnCompressor(c); compressionTestResults[algo.ordinal()] = true; // passes } catch (Throwable t) { compressionTestResults[algo.ordinal()] = false; // failure throw new IOException(t); } }
public OutputStream createCompressionStream( OutputStream downStream, Compressor compressor, int downStreamBufferSize) throws IOException { OutputStream bos1 = null; if (downStreamBufferSize > 0) { bos1 = new BufferedOutputStream(downStream, downStreamBufferSize); } else { bos1 = downStream; } CompressionOutputStream cos = createPlainCompressionStream(bos1, compressor); BufferedOutputStream bos2 = new BufferedOutputStream(new FinishOnFlushCompressionStream(cos), DATA_OBUF_SIZE); return bos2; }
public static ColumnFamilyConfig buildCfConfig(Conf conf) { ColumnFamilyConfig family = new ColumnFamilyConfig(); String compression = conf.getChild("compression").getValue(null); if (compression != null) { family.setCompression(Compression.Algorithm.valueOf(compression.toUpperCase())); } String bloomFilter = conf.getChild("bloomFilter").getValue(null); if (bloomFilter != null) { family.setBoomFilter(StoreFile.BloomType.valueOf(bloomFilter.toUpperCase())); } Integer blockSize = conf.getChild("blockSize").getValueAsInteger(null); if (blockSize != null) { family.setBlockSize(blockSize); } return family; } }
/** @return compression type being used for the column family */ public Compression.Algorithm getCompression() { String n = getValue(COMPRESSION); if (n == null) { return Compression.Algorithm.NONE; } return Compression.Algorithm.valueOf(n.toUpperCase()); }
/** @return compression type being used for the column family for major compression */ public Compression.Algorithm getCompactionCompression() { String n = getValue(COMPRESSION_COMPACT); if (n == null) { return getCompression(); } return Compression.Algorithm.valueOf(n.toUpperCase()); }
@Override public void setCompression(HColumnDescriptor cd, String algo) { cd.setCompressionType(Compression.Algorithm.valueOf(algo)); }