congrats Icon
New! Tabnine Pro 14-day free trial
Start a free trial
Tabnine Logo
ReplicaInfo.getBlockId
Code IndexAdd Tabnine to your IDE (free)

How to use
getBlockId
method
in
org.apache.hadoop.hdfs.server.datanode.ReplicaInfo

Best Java code snippets using org.apache.hadoop.hdfs.server.datanode.ReplicaInfo.getBlockId (Showing top 20 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

private LocalReplica buildRUR() throws IllegalArgumentException {
 if (null == fromReplica) {
  throw new IllegalArgumentException(
    "Missing a valid replica to recover from");
 }
 if (null != writer || null != block) {
  throw new IllegalArgumentException("Invalid state for "
    + "recovering from replica with blk id "
    + fromReplica.getBlockId());
 }
 if (fromReplica.getState() == ReplicaState.RUR) {
  return new ReplicaUnderRecovery((ReplicaUnderRecovery) fromReplica);
 } else {
  return new ReplicaUnderRecovery(fromReplica, recoveryId);
 }
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Invalidate a block but does not delete the actual on-disk block file.
 *
 * It should only be used when deactivating disks.
 *
 * @param bpid the block pool ID.
 * @param block The block to be invalidated.
 */
public void invalidate(String bpid, ReplicaInfo block) {
 // If a DFSClient has the replica in its cache of short-circuit file
 // descriptors (and the client is using ShortCircuitShm), invalidate it.
 datanode.getShortCircuitRegistry().processBlockInvalidation(
   new ExtendedBlockId(block.getBlockId(), bpid));
 // If the block is cached, start uncaching it.
 cacheManager.uncacheBlock(bpid, block.getBlockId());
 datanode.notifyNamenodeDeletedBlock(new ExtendedBlock(bpid, block),
   block.getStorageUuid());
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Add a replica's meta information into the map, if already exist
 * return the old replicaInfo.
 */
ReplicaInfo addAndGet(String bpid, ReplicaInfo replicaInfo) {
 checkBlockPool(bpid);
 checkBlock(replicaInfo);
 try (AutoCloseableLock l = lock.acquire()) {
  FoldedTreeSet<ReplicaInfo> set = map.get(bpid);
  if (set == null) {
   // Add an entry for block pool if it does not exist already
   set = new FoldedTreeSet<>();
   map.put(bpid, set);
  }
  ReplicaInfo oldReplicaInfo = set.get(replicaInfo.getBlockId(),
    LONG_AND_BLOCK_COMPARATOR);
  if (oldReplicaInfo != null) {
   return oldReplicaInfo;
  } else {
   set.addOrReplace(replicaInfo);
  }
  return replicaInfo;
 }
}
origin: org.apache.hadoop/hadoop-hdfs

final StorageLocation blockStorageLocation =
  block.getVolume().getStorageLocation();
LOG.trace("checking for block " + block.getBlockId() +
  " with storageLocation " + blockStorageLocation);
if (blockStorageLocation.equals(sdLocation)) {
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Move a persisted replica from lazypersist directory to a subdirectory
 * under finalized.
 */
ReplicaInfo activateSavedReplica(ReplicaInfo replicaInfo,
  RamDiskReplica replicaState) throws IOException {
 File metaFile = replicaState.getSavedMetaFile();
 File blockFile = replicaState.getSavedBlockFile();
 final long blockId = replicaInfo.getBlockId();
 final File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, blockId);
 final File targetBlockFile = new File(blockDir, blockFile.getName());
 final File targetMetaFile = new File(blockDir, metaFile.getName());
 fileIoProvider.moveFile(volume, blockFile, targetBlockFile);
 FsDatasetImpl.LOG.info("Moved " + blockFile + " to " + targetBlockFile);
 fileIoProvider.moveFile(volume, metaFile, targetMetaFile);
 FsDatasetImpl.LOG.info("Moved " + metaFile + " to " + targetMetaFile);
 ReplicaInfo newReplicaInfo =
   new ReplicaBuilder(ReplicaState.FINALIZED)
   .setBlockId(blockId)
   .setLength(replicaInfo.getBytesOnDisk())
   .setGenerationStamp(replicaInfo.getGenerationStamp())
   .setFsVolume(replicaState.getLazyPersistVolume())
   .setDirectoryToUse(targetBlockFile.getParentFile())
   .build();
 return newReplicaInfo;
}
origin: org.apache.hadoop/hadoop-hdfs

   + "Replica with blk id " + replicaInfo.getBlockId() + " has state "
   + replicaInfo.getState());
cacheManager.uncacheBlock(bpid, replicaInfo.getBlockId());
  newGS, estimateBlockLen);
if (rip.getReplicaInfo().getState() != ReplicaState.RBW) {
 throw new IOException("Append on block " + replicaInfo.getBlockId() +
   " returned a replica of state " + rip.getReplicaInfo().getState()
   + "; expected RBW");
origin: org.apache.hadoop/hadoop-hdfs

+ oldReplicaInfo.getBlockId()
+ " should be derived from LocalReplica");
origin: org.apache.hadoop/hadoop-hdfs

addDeletingBlock(bpid, removing.getBlockId());
if (LOG.isDebugEnabled()) {
 LOG.debug("Block file " + removing.getBlockURI()
origin: org.apache.hadoop/hadoop-hdfs

ReplicaInfo memBlock = bl.get(m);
ScanInfo info = blockpoolReport[d];
if (info.getBlockId() < memBlock.getBlockId()) {
 if (!dataset.isDeletingBlock(bpid, info.getBlockId())) {
 continue;
if (info.getBlockId() > memBlock.getBlockId()) {
        memBlock.getBlockId(), info.getVolume());
 m++;
 continue;
ReplicaInfo current = bl.get(m++);
addDifference(diffRecord, statsRecord,
       current.getBlockId(), current.getVolume());
origin: org.apache.hadoop/hadoop-hdfs

boolean copyOnTruncate = newBlockId > 0L && rur.getBlockId() != newBlockId;
    rur, bpid, newBlockId, recoveryId, newlength);
  if (newReplicaInfo.getState() != ReplicaState.RBW) {
   throw new IOException("Append on block " + rur.getBlockId()
     + " returned a replica of state " + newReplicaInfo.getState()
     + "; expected RBW");
origin: org.apache.hadoop/hadoop-hdfs

public ReplicaInfo moveBlockToTmpLocation(ExtendedBlock block,
  ReplicaInfo replicaInfo,
  int smallBufferSize,
  Configuration conf) throws IOException {
 File[] blockFiles = FsDatasetImpl.copyBlockFiles(block.getBlockId(),
   block.getGenerationStamp(), replicaInfo,
   getTmpDir(block.getBlockPoolId()),
   replicaInfo.isOnTransientStorage(), smallBufferSize, conf);
 ReplicaInfo newReplicaInfo = new ReplicaBuilder(ReplicaState.TEMPORARY)
   .setBlockId(replicaInfo.getBlockId())
   .setGenerationStamp(replicaInfo.getGenerationStamp())
   .setFsVolume(this)
   .setDirectoryToUse(blockFiles[0].getParentFile())
   .setBytesToReserve(0)
   .build();
 newReplicaInfo.setNumBytes(blockFiles[1].length());
 return newReplicaInfo;
}
origin: org.apache.hadoop/hadoop-hdfs

try (AutoCloseableLock lock = datasetLock.acquire()) {
 if (volumeMap.get(bpid, replicaInfo.getBlockId()).getGenerationStamp()
   > replicaInfo.getGenerationStamp()) {
  throw new IOException("Generation Stamp should be monotonically "
     false);
   ramDiskReplicaTracker.addReplica(
     bpid, replicaInfo.getBlockId(), v, replicaInfo.getNumBytes());
   datanode.getMetrics().addRamDiskBytesWrite(replicaInfo.getNumBytes());
origin: org.apache.hadoop/hadoop-hdfs

 assert finalized.getBlockId() == oldBlock.getBlockId()
   && finalized.getGenerationStamp() == recoveryId
   && finalized.getNumBytes() == newlength
   + ", newBlockId=" + newBlockId + ", finalized=" + finalized;
} else {
 assert finalized.getBlockId() == oldBlock.getBlockId()
   && finalized.getGenerationStamp() == oldBlock.getGenerationStamp()
   && finalized.getNumBytes() == oldBlock.getNumBytes()
origin: org.apache.hadoop/hadoop-hdfs

.setBlockId(replicaInfo.getBlockId())
.setLength(replicaInfo.getNumBytes())
.setGenerationStamp(newGS)
origin: org.apache.hadoop/hadoop-hdfs

  .build();
ReplicaInfo oldReplica =
  volumeMap.get(bpid, newReplica.getBlockId());
if (oldReplica == null) {
 volumeMap.add(bpid, newReplica);
 incDfsUsed(region.getBlock().getNumBytes());
} else {
 LOG.warn("A block with id " + newReplica.getBlockId()
   + " exists locally. Skipping PROVIDED replica");
origin: io.prestosql.hadoop/hadoop-apache

/**
 * Return the parent directory path where this replica is located
 * @return the parent directory path where this replica is located
 */
File getDir() {
 return hasSubdirs ? DatanodeUtil.idToBlockDir(baseDir,
   getBlockId()) : baseDir;
}
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Return the parent directory path where this replica is located
 * @return the parent directory path where this replica is located
 */
File getDir() {
 return hasSubdirs ? DatanodeUtil.idToBlockDir(baseDir,
   getBlockId()) : baseDir;
}
origin: ch.cern.hadoop/hadoop-hdfs

 public ReplicaRecoveryInfo createInfo() {
  return new ReplicaRecoveryInfo(original.getBlockId(), 
    original.getBytesOnDisk(), original.getGenerationStamp(),
    original.getState()); 
 }
}
origin: io.prestosql.hadoop/hadoop-apache

 public ReplicaRecoveryInfo createInfo() {
  return new ReplicaRecoveryInfo(original.getBlockId(), 
    original.getBytesOnDisk(), original.getGenerationStamp(),
    original.getState()); 
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

private static void assertEquals(ReplicaInfo originalInfo, ReplicaRecoveryInfo recoveryInfo) {
 Assert.assertEquals(originalInfo.getBlockId(), recoveryInfo.getBlockId());
 Assert.assertEquals(originalInfo.getGenerationStamp(), recoveryInfo.getGenerationStamp());
 Assert.assertEquals(originalInfo.getBytesOnDisk(), recoveryInfo.getNumBytes());
 Assert.assertEquals(originalInfo.getState(), recoveryInfo.getOriginalReplicaState());
}
org.apache.hadoop.hdfs.server.datanodeReplicaInfogetBlockId

Popular methods of ReplicaInfo

  • getBytesOnDisk
  • getGenerationStamp
  • getNumBytes
  • getState
  • getVolume
    Get the volume where this replica is located on disk
  • getBlockFile
    Get the full path of this replica's data file
  • getMetaFile
    Get the full path of this replica's meta file
  • getBlockName
  • getBytesReserved
    Number of bytes reserved for this replica on disk.
  • getStorageUuid
    Get the storageUuid of the volume that stores this replica.
  • getVisibleLength
  • isOnTransientStorage
  • getVisibleLength,
  • isOnTransientStorage,
  • setGenerationStamp,
  • setNumBytes,
  • equals,
  • getDir,
  • hashCode,
  • isUnlinked,
  • parseBaseDir

Popular in Java

  • Running tasks concurrently on multiple threads
  • getExternalFilesDir (Context)
  • scheduleAtFixedRate (Timer)
  • getApplicationContext (Context)
  • URLEncoder (java.net)
    This class is used to encode a string using the format required by application/x-www-form-urlencoded
  • Path (java.nio.file)
  • Servlet (javax.servlet)
    Defines methods that all servlets must implement. A servlet is a small Java program that runs within
  • JButton (javax.swing)
  • Loader (org.hibernate.loader)
    Abstract superclass of object loading (and querying) strategies. This class implements useful common
  • Option (scala)
  • Top 15 Vim Plugins
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimAtomGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyStudentsTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now