/** * Block size for reading an object in chunks. * * @return block size in bytes */ private long getBlockSize() { return Configuration.getBytes(PropertyKey.USER_BLOCK_SIZE_BYTES_DEFAULT); }
@Override public long getDefaultBlockSize() { return Configuration.getBytes(PropertyKey.USER_BLOCK_SIZE_BYTES_DEFAULT); }
/** * Gets the block size in bytes. This method defaults to the default user block size in Alluxio. * * @param path the file name * @return the default Alluxio user block size */ @Override public long getBlockSizeByte(String path) throws IOException { return Configuration.getBytes(PropertyKey.USER_BLOCK_SIZE_BYTES_DEFAULT); }
/** * Initializes the internal buffer based on the user's specified size. Any reads above half * this size will not be buffered. * * @return a heap buffer of user configured size */ private ByteBuffer allocateBuffer() { Configuration conf = ClientContext.getConf(); return ByteBuffer.allocate( (int) conf.getBytes(Constants.USER_BLOCK_REMOTE_READ_BUFFER_SIZE_BYTES)); }
/** * @return a newly allocated byte buffer of the user defined default size */ private ByteBuffer allocateBuffer() { Configuration conf = ClientContext.getConf(); return ByteBuffer.allocate((int) conf.getBytes(Constants.USER_FILE_BUFFER_BYTES)); } }
private CreateFileOptions() { super(); mBlockSizeBytes = Configuration.getBytes(PropertyKey.USER_BLOCK_SIZE_BYTES_DEFAULT); mTtl = Constants.NO_TTL; mTtlAction = TtlAction.DELETE; mMode.applyFileUMask(); mCacheable = false; }
@Override public long getDefaultBlockSize() { return ClientContext.getConf().getBytes(Constants.USER_BLOCK_SIZE_BYTES_DEFAULT); }
/** * @param ufs the under storage holding the journal * @param log the location to write the log to * @param journalFormatter the journal formatter to use when writing journal entries * @param journalWriter the journal writer to use to get journal entry sequence numbers and * complete the log when it needs to be rotated */ public EntryOutputStream(UnderFileSystem ufs, URI log, JournalFormatter journalFormatter, UfsJournalWriter journalWriter) throws IOException { mUfs = ufs; mCurrentLog = log; mJournalFormatter = journalFormatter; mJournalWriter = journalWriter; mMaxLogSize = Configuration.getBytes(PropertyKey.MASTER_JOURNAL_LOG_SIZE_BYTES_MAX); mRawOutputStream = mUfs.create(mCurrentLog.toString(), CreateOptions.defaults().setEnsureAtomic(false).setCreateParent(true)); LOG.info("Opened current log file: {}", mCurrentLog); mDataOutputStream = new DataOutputStream(mRawOutputStream); }
/** * Creates a new instance of {@link UfsJournalLogWriter}. * * @param journal the handle to the journal * @param nextSequenceNumber the sequence number to begin writing at */ UfsJournalLogWriter(UfsJournal journal, long nextSequenceNumber) throws IOException { mJournal = Preconditions.checkNotNull(journal, "journal"); mUfs = mJournal.getUfs(); mNextSequenceNumber = nextSequenceNumber; mMaxLogSize = Configuration.getBytes(PropertyKey.MASTER_JOURNAL_LOG_SIZE_BYTES_MAX); mRotateLogForNextWrite = true; UfsJournalFile currentLog = UfsJournalSnapshot.getCurrentLog(mJournal); if (currentLog != null) { mJournalOutputStream = new JournalOutputStream(currentLog, ByteStreams.nullOutputStream()); } mGarbageCollector = new UfsJournalGarbageCollector(mJournal); mEntriesToFlush = new ArrayDeque<>(); }
/** * Creates a new instance with defaults from the configuration. */ private CreateFileOptions() { Configuration conf = ClientContext.getConf(); mRecursive = true; mBlockSizeBytes = conf.getBytes(Constants.USER_BLOCK_SIZE_BYTES_DEFAULT); try { mLocationPolicy = CommonUtils.createNewClassInstance( conf.<FileWriteLocationPolicy>getClass(Constants.USER_FILE_WRITE_LOCATION_POLICY), new Class[]{}, new Object[]{}); } catch (Exception e) { throw Throwables.propagate(e); } mWriteType = conf.getEnum(Constants.USER_FILE_WRITE_TYPE_DEFAULT, WriteType.class); mTtl = Constants.NO_TTL; }
private OutStreamOptions() { Configuration conf = ClientContext.getConf(); mBlockSizeBytes = conf.getBytes(Constants.USER_BLOCK_SIZE_BYTES_DEFAULT); mTtl = Constants.NO_TTL; try { mLocationPolicy = CommonUtils.createNewClassInstance(ClientContext.getConf() .<FileWriteLocationPolicy>getClass(Constants.USER_FILE_WRITE_LOCATION_POLICY), new Class[] {}, new Object[] {}); } catch (Exception e) { throw Throwables.propagate(e); } mWriteType = conf.getEnum(Constants.USER_FILE_WRITE_TYPE_DEFAULT, WriteType.class); }
LOG.debug("HdfsFileInputStream({}, {}, {}, {}, {})", uri, conf, bufferSize, stats); Configuration configuration = ClientContext.getConf(); long bufferBytes = configuration.getBytes(Constants.USER_FILE_BUFFER_BYTES); mBuffer = new byte[Ints.checkedCast(bufferBytes) * 4]; mCurrentPosition = 0;
sFilesBytes = sFileBytes * sFiles; long fileBufferBytes = Configuration.getBytes(PropertyKey.USER_FILE_BUFFER_BYTES); sResultPrefix = String.format( "Threads %d FilesPerThread %d TotalFiles %d "
/** * Creates a new local block output stream. * * @param blockId the block id * @param blockSize the block size * @throws IOException if an I/O error occurs */ public LocalBlockOutStream(long blockId, long blockSize) throws IOException { super(blockId, blockSize); mCloser = Closer.create(); mBlockWorkerClient = mContext.acquireWorkerClient(NetworkAddressUtils.getLocalHostName(ClientContext.getConf())); try { long initialSize = ClientContext.getConf().getBytes(Constants.USER_FILE_BUFFER_BYTES); String blockPath = mBlockWorkerClient.requestBlockLocation(mBlockId, initialSize); mReservedBytes += initialSize; mWriter = new LocalFileBlockWriter(blockPath); mCloser.register(mWriter); } catch (IOException e) { mContext.releaseWorkerClient(mBlockWorkerClient); throw e; } }