Tabnine Logo
ReplicaInfo.getNumBytes
Code IndexAdd Tabnine to your IDE (free)

How to use
getNumBytes
method
in
org.apache.hadoop.hdfs.server.datanode.ReplicaInfo

Best Java code snippets using org.apache.hadoop.hdfs.server.datanode.ReplicaInfo.getNumBytes (Showing top 20 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

@VisibleForTesting
static ReplicaInfo selectReplicaToDelete(final ReplicaInfo replica1,
  final ReplicaInfo replica2) {
 ReplicaInfo replicaToKeep;
 ReplicaInfo replicaToDelete;
 // it's the same block so don't ever delete it, even if GS or size
 // differs.  caller should keep the one it just discovered on disk
 if (replica1.getBlockURI().equals(replica2.getBlockURI())) {
  return null;
 }
 if (replica1.getGenerationStamp() != replica2.getGenerationStamp()) {
  replicaToKeep = replica1.getGenerationStamp() > replica2.getGenerationStamp()
    ? replica1 : replica2;
 } else if (replica1.getNumBytes() != replica2.getNumBytes()) {
  replicaToKeep = replica1.getNumBytes() > replica2.getNumBytes() ?
    replica1 : replica2;
 } else if (replica1.getVolume().isTransientStorage() &&
       !replica2.getVolume().isTransientStorage()) {
  replicaToKeep = replica2;
 } else {
  replicaToKeep = replica1;
 }
 replicaToDelete = (replicaToKeep == replica1) ? replica2 : replica1;
 if (LOG.isDebugEnabled()) {
  LOG.debug("resolveDuplicateReplicas decide to keep " + replicaToKeep
    + ".  Will try to delete " + replicaToDelete);
 }
 return replicaToDelete;
}
origin: org.apache.hadoop/hadoop-hdfs

@Override  //Object
public String toString() {
 return getClass().getSimpleName()
   + ", " + super.toString()
   + ", " + getState()
   + "\n  getNumBytes()     = " + getNumBytes()
   + "\n  getBytesOnDisk()  = " + getBytesOnDisk()
   + "\n  getVisibleLength()= " + getVisibleLength()
   + "\n  getVolume()       = " + getVolume()
   + "\n  getBlockURI()     = " + getBlockURI();
}
origin: org.apache.hadoop/hadoop-hdfs

   ReplicaNotFoundException.UNFINALIZED_REPLICA + b);
if (replicaInfo.getNumBytes() != expectedBlockLen) {
 throw new IOException("Corrupted replica " + replicaInfo +
   " with a length of " + replicaInfo.getNumBytes() +
   " expected length is " + expectedBlockLen);
origin: org.apache.hadoop/hadoop-hdfs

releaseLockedMemory(
  replicaInfo.getOriginalBytesReserved()
    - replicaInfo.getNumBytes(),
  false);
ramDiskReplicaTracker.addReplica(
  bpid, replicaInfo.getBlockId(), v, replicaInfo.getNumBytes());
datanode.getMetrics().addRamDiskBytesWrite(replicaInfo.getNumBytes());
origin: org.apache.hadoop/hadoop-hdfs

if (rur.getNumBytes() < newlength) {
 throw new IOException("rur.getNumBytes() < newlength = " + newlength
   + ", rur=" + rur);
if (rur.getNumBytes() > newlength) {
 if(!copyOnTruncate) {
  rur.breakHardLinksIfNeeded();
origin: org.apache.hadoop/hadoop-hdfs

public ReplicaInPipeline append(String bpid, ReplicaInfo replicaInfo,
  long newGS, long estimateBlockLen) throws IOException {
 long bytesReserved = estimateBlockLen - replicaInfo.getNumBytes();
 if (getAvailable() < bytesReserved) {
  throw new DiskOutOfSpaceException("Insufficient space for appending to "
 LocalReplicaInPipeline newReplicaInfo = new ReplicaBuilder(ReplicaState.RBW)
   .setBlockId(replicaInfo.getBlockId())
   .setLength(replicaInfo.getNumBytes())
   .setGenerationStamp(newGS)
   .setFsVolume(this)
origin: org.apache.hadoop/hadoop-hdfs

   ReplicaNotFoundException.UNFINALIZED_REPLICA + block);
if (replicaInfo.getNumBytes() != block.getNumBytes()) {
 throw new IOException("Corrupted replica " + replicaInfo
   + " with a length of " + replicaInfo.getNumBytes()
   + " expected length is " + block.getNumBytes());
origin: org.apache.hadoop/hadoop-hdfs

public ReplicaInPipeline updateRURCopyOnTruncate(ReplicaInfo rur,
  String bpid, long newBlockId, long recoveryId, long newlength)
  throws IOException {
 rur.breakHardLinksIfNeeded();
 File[] copiedReplicaFiles =
   copyReplicaWithNewBlockIdAndGS(rur, bpid, newBlockId, recoveryId);
 File blockFile = copiedReplicaFiles[1];
 File metaFile = copiedReplicaFiles[0];
 LocalReplica.truncateBlock(rur.getVolume(), blockFile, metaFile,
   rur.getNumBytes(), newlength, fileIoProvider);
 LocalReplicaInPipeline newReplicaInfo = new ReplicaBuilder(ReplicaState.RBW)
   .setBlockId(newBlockId)
   .setGenerationStamp(recoveryId)
   .setFsVolume(this)
   .setDirectoryToUse(blockFile.getParentFile())
   .setBytesToReserve(newlength)
   .buildLocalReplicaInPipeline();
 // In theory, this rbw replica needs to reload last chunk checksum,
 // but it is immediately converted to finalized state within the same lock,
 // so no need to update it.
 return newReplicaInfo;
}
origin: org.apache.hadoop/hadoop-hdfs

 if (vol.isTransientStorage()) {
  long lockedBytesReserved =
    cacheManager.reserve(diskBlockInfo.getNumBytes()) > 0 ?
      diskBlockInfo.getNumBytes() : 0;
  ramDiskReplicaTracker.addReplica(
    bpid, blockId, (FsVolumeImpl) vol, lockedBytesReserved);
if (memBlockInfo.getNumBytes() != memBlockInfo.getBlockDataLength()) {
   + memBlockInfo.getNumBytes() + " to "
   + memBlockInfo.getBlockDataLength());
 memBlockInfo.setNumBytes(memBlockInfo.getBlockDataLength());
origin: org.apache.hadoop/hadoop-hdfs

|| info.getBlockLength() != memBlock.getNumBytes()) {
origin: org.apache.hadoop/hadoop-hdfs

assert finalized.getBlockId() == oldBlock.getBlockId()
  && finalized.getGenerationStamp() == recoveryId
  && finalized.getNumBytes() == newlength
  : "Replica information mismatched: oldBlock=" + oldBlock
  + ", recoveryId=" + recoveryId + ", newlength=" + newlength
assert finalized.getBlockId() == oldBlock.getBlockId()
  && finalized.getGenerationStamp() == oldBlock.getGenerationStamp()
  && finalized.getNumBytes() == oldBlock.getNumBytes()
  : "Finalized and old information mismatched: oldBlock=" + oldBlock
  + ", genStamp=" + oldBlock.getGenerationStamp()
origin: org.apache.hadoop/hadoop-hdfs

final long numBytes = temp.getNumBytes();
if (numBytes < visible) {
 throw new IOException(numBytes + " = numBytes < visible = "
origin: org.apache.hadoop/hadoop-hdfs

  StorageType.DEFAULT, null, replicaInfo.getNumBytes());
targetVolume = (FsVolumeImpl) targetReference.getVolume();
origin: org.apache.hadoop/hadoop-hdfs

public ReplicaInPipeline convertTemporaryToRbw(ExtendedBlock b,
  ReplicaInfo temp) throws IOException {
 final long blockId = b.getBlockId();
 final long expectedGs = b.getGenerationStamp();
 final long visible = b.getNumBytes();
 final long numBytes = temp.getNumBytes();
 // move block files to the rbw directory
 BlockPoolSlice bpslice = getBlockPoolSlice(b.getBlockPoolId());
 final File dest = FsDatasetImpl.moveBlockFiles(b.getLocalBlock(), temp,
   bpslice.getRbwDir());
 // create RBW
 final LocalReplicaInPipeline rbw = new ReplicaBuilder(ReplicaState.RBW)
   .setBlockId(blockId)
   .setLength(numBytes)
   .setGenerationStamp(expectedGs)
   .setFsVolume(this)
   .setDirectoryToUse(dest.getParentFile())
   .setWriterThread(Thread.currentThread())
   .setBytesToReserve(0)
   .buildLocalReplicaInPipeline();
 rbw.setBytesAcked(visible);
 // load last checksum and datalen
 final File destMeta = FsDatasetUtil.getMetaFile(dest,
   b.getGenerationStamp());
 byte[] lastChunkChecksum = loadLastPartialChunkChecksum(dest, destMeta);
 rbw.setLastChecksumAndDataLen(numBytes, lastChunkChecksum);
 return rbw;
}
origin: org.apache.hadoop/hadoop-hdfs

long replicaLen = replicaInfo.getNumBytes();
if (replicaInfo.getState() == ReplicaState.RBW) {
 ReplicaInPipeline rbw = (ReplicaInPipeline) replicaInfo;
origin: ch.cern.hadoop/hadoop-hdfs

@VisibleForTesting
static ReplicaInfo selectReplicaToDelete(final ReplicaInfo replica1,
  final ReplicaInfo replica2) {
 ReplicaInfo replicaToKeep;
 ReplicaInfo replicaToDelete;
 // it's the same block so don't ever delete it, even if GS or size
 // differs.  caller should keep the one it just discovered on disk
 if (replica1.getBlockFile().equals(replica2.getBlockFile())) {
  return null;
 }
 if (replica1.getGenerationStamp() != replica2.getGenerationStamp()) {
  replicaToKeep = replica1.getGenerationStamp() > replica2.getGenerationStamp()
    ? replica1 : replica2;
 } else if (replica1.getNumBytes() != replica2.getNumBytes()) {
  replicaToKeep = replica1.getNumBytes() > replica2.getNumBytes() ?
    replica1 : replica2;
 } else if (replica1.getVolume().isTransientStorage() &&
       !replica2.getVolume().isTransientStorage()) {
  replicaToKeep = replica2;
 } else {
  replicaToKeep = replica1;
 }
 replicaToDelete = (replicaToKeep == replica1) ? replica2 : replica1;
 if (LOG.isDebugEnabled()) {
  LOG.debug("resolveDuplicateReplicas decide to keep " + replicaToKeep
    + ".  Will try to delete " + replicaToDelete);
 }
 return replicaToDelete;
}
origin: io.prestosql.hadoop/hadoop-apache

@Override  //Object
public String toString() {
 return getClass().getSimpleName()
   + ", " + super.toString()
   + ", " + getState()
   + "\n  getNumBytes()     = " + getNumBytes()
   + "\n  getBytesOnDisk()  = " + getBytesOnDisk()
   + "\n  getVisibleLength()= " + getVisibleLength()
   + "\n  getVolume()       = " + getVolume()
   + "\n  getBlockFile()    = " + getBlockFile();
}
origin: ch.cern.hadoop/hadoop-hdfs

@Override  //Object
public String toString() {
 return getClass().getSimpleName()
   + ", " + super.toString()
   + ", " + getState()
   + "\n  getNumBytes()     = " + getNumBytes()
   + "\n  getBytesOnDisk()  = " + getBytesOnDisk()
   + "\n  getVisibleLength()= " + getVisibleLength()
   + "\n  getVolume()       = " + getVolume()
   + "\n  getBlockFile()    = " + getBlockFile();
}
origin: org.apache.hadoop/hadoop-hdfs-test

private void verifyAddition(long blockId, long genStamp, long size) {
 final ReplicaInfo replicainfo;
 replicainfo = fds.fetchReplicaInfo(blockId);
 assertNotNull(replicainfo);
 // Added block has the same file as the one created by the test
 File file = new File(getBlockFile(blockId));
 assertEquals(file.getName(), fds.findBlockFile(blockId).getName());
 // Generation stamp is same as that of created file
 assertEquals(genStamp, replicainfo.getGenerationStamp());
 // File size matches
 assertEquals(size, replicainfo.getNumBytes());
}
origin: ch.cern.hadoop/hadoop-hdfs

private void verifyAddition(long blockId, long genStamp, long size) {
 final ReplicaInfo replicainfo;
 replicainfo = FsDatasetTestUtil.fetchReplicaInfo(fds, bpid, blockId);
 assertNotNull(replicainfo);
 // Added block has the same file as the one created by the test
 File file = new File(getBlockFile(blockId));
 assertEquals(file.getName(),
   FsDatasetTestUtil.getFile(fds, bpid, blockId).getName());
 // Generation stamp is same as that of created file
 assertEquals(genStamp, replicainfo.getGenerationStamp());
 // File size matches
 assertEquals(size, replicainfo.getNumBytes());
}
org.apache.hadoop.hdfs.server.datanodeReplicaInfogetNumBytes

Popular methods of ReplicaInfo

  • getBlockId
  • getBytesOnDisk
  • getGenerationStamp
  • getState
  • getVolume
    Get the volume where this replica is located on disk
  • getBlockFile
    Get the full path of this replica's data file
  • getMetaFile
    Get the full path of this replica's meta file
  • getBlockName
  • getBytesReserved
    Number of bytes reserved for this replica on disk.
  • getStorageUuid
    Get the storageUuid of the volume that stores this replica.
  • getVisibleLength
  • isOnTransientStorage
  • getVisibleLength,
  • isOnTransientStorage,
  • setGenerationStamp,
  • setNumBytes,
  • equals,
  • getDir,
  • hashCode,
  • isUnlinked,
  • parseBaseDir

Popular in Java

  • Creating JSON documents from java classes using gson
  • getResourceAsStream (ClassLoader)
  • getSystemService (Context)
  • getExternalFilesDir (Context)
  • FileInputStream (java.io)
    An input stream that reads bytes from a file. File file = ...finally if (in != null) in.clos
  • Dictionary (java.util)
    Note: Do not use this class since it is obsolete. Please use the Map interface for new implementatio
  • ThreadPoolExecutor (java.util.concurrent)
    An ExecutorService that executes each submitted task using one of possibly several pooled threads, n
  • Servlet (javax.servlet)
    Defines methods that all servlets must implement. A servlet is a small Java program that runs within
  • DataSource (javax.sql)
    An interface for the creation of Connection objects which represent a connection to a database. This
  • JTextField (javax.swing)
  • CodeWhisperer alternatives
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now