congrats Icon
New! Announcing our next generation AI code completions
Read here
Tabnine Logo
ReplicaInfo.getVolume
Code IndexAdd Tabnine to your IDE (free)

How to use
getVolume
method
in
org.apache.hadoop.hdfs.server.datanode.ReplicaInfo

Best Java code snippets using org.apache.hadoop.hdfs.server.datanode.ReplicaInfo.getVolume (Showing top 20 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

/**
 * Copy constructor.
 * @param from where to copy from
 */
ReplicaInfo(ReplicaInfo from) {
 this(from, from.getVolume());
}
origin: org.apache.hadoop/hadoop-hdfs

private boolean isReplicaProvided(ReplicaInfo replicaInfo) {
 if (replicaInfo == null) {
  return false;
 }
 return replicaInfo.getVolume().getStorageType() == StorageType.PROVIDED;
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Fetch the base URI of the volume on which this replica resides.
 *
 * @returns Volume base URI as string if available. Else returns the
 *          the string "unavailable".
 */
private String getVolumeBaseUri() {
 final ReplicaInfo ri = replicaInfo.getReplicaInfo();
 if (ri != null && ri.getVolume() != null) {
  return ri.getVolume().getBaseURI().toString();
 }
 return "unavailable";
}
origin: org.apache.hadoop/hadoop-hdfs

public ReplicaUnderRecovery(ReplicaInfo replica, long recoveryId) {
 super(replica, replica.getVolume(), ((LocalReplica)replica).getDir());
 if ( replica.getState() != ReplicaState.FINALIZED &&
    replica.getState() != ReplicaState.RBW &&
    replica.getState() != ReplicaState.RWR ) {
  throw new IllegalArgumentException("Cannot recover replica: " + replica);
 }
 this.original = (LocalReplica) replica;
 this.recoveryId = recoveryId;
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Find the file corresponding to the block and return it if it exists.
 */
ReplicaInfo validateBlockFile(String bpid, long blockId) {
 //Should we check for metadata file too?
 final ReplicaInfo r;
 try (AutoCloseableLock lock = datasetLock.acquire()) {
  r = volumeMap.get(bpid, blockId);
 }
 if (r != null) {
  if (r.blockDataExists()) {
   return r;
  }
  // if file is not null, but doesn't exist - possibly disk failed
  datanode.checkDiskErrorAsync(r.getVolume());
 }
 if (LOG.isDebugEnabled()) {
  LOG.debug("blockId=" + blockId + ", replica=" + r);
 }
 return null;
}
origin: org.apache.hadoop/hadoop-hdfs

@Override  //Object
public String toString() {
 return getClass().getSimpleName()
   + ", " + super.toString()
   + ", " + getState()
   + "\n  getNumBytes()     = " + getNumBytes()
   + "\n  getBytesOnDisk()  = " + getBytesOnDisk()
   + "\n  getVisibleLength()= " + getVisibleLength()
   + "\n  getVolume()       = " + getVolume()
   + "\n  getBlockURI()     = " + getBlockURI();
}
origin: org.apache.hadoop/hadoop-hdfs

@Override
public FsVolumeImpl getVolume(final ExtendedBlock b) {
 try (AutoCloseableLock lock = datasetLock.acquire()) {
  final ReplicaInfo r =
    volumeMap.get(b.getBlockPoolId(), b.getLocalBlock());
  return r != null ? (FsVolumeImpl) r.getVolume() : null;
 }
}
origin: org.apache.hadoop/hadoop-hdfs

private File[] copyReplicaWithNewBlockIdAndGS(
  ReplicaInfo replicaInfo, String bpid, long newBlkId, long newGS)
  throws IOException {
 String blockFileName = Block.BLOCK_FILE_PREFIX + newBlkId;
 FsVolumeImpl v = (FsVolumeImpl) replicaInfo.getVolume();
 final File tmpDir = v.getBlockPoolSlice(bpid).getTmpDir();
 final File destDir = DatanodeUtil.idToBlockDir(tmpDir, newBlkId);
 final File dstBlockFile = new File(destDir, blockFileName);
 final File dstMetaFile = FsDatasetUtil.getMetaFile(dstBlockFile, newGS);
 return FsDatasetImpl.copyBlockFiles(replicaInfo, dstMetaFile,
   dstBlockFile, true, DFSUtilClient.getSmallBufferSize(conf), conf);
}
origin: org.apache.hadoop/hadoop-hdfs

@VisibleForTesting
static ReplicaInfo selectReplicaToDelete(final ReplicaInfo replica1,
  final ReplicaInfo replica2) {
 ReplicaInfo replicaToKeep;
 ReplicaInfo replicaToDelete;
 // it's the same block so don't ever delete it, even if GS or size
 // differs.  caller should keep the one it just discovered on disk
 if (replica1.getBlockURI().equals(replica2.getBlockURI())) {
  return null;
 }
 if (replica1.getGenerationStamp() != replica2.getGenerationStamp()) {
  replicaToKeep = replica1.getGenerationStamp() > replica2.getGenerationStamp()
    ? replica1 : replica2;
 } else if (replica1.getNumBytes() != replica2.getNumBytes()) {
  replicaToKeep = replica1.getNumBytes() > replica2.getNumBytes() ?
    replica1 : replica2;
 } else if (replica1.getVolume().isTransientStorage() &&
       !replica2.getVolume().isTransientStorage()) {
  replicaToKeep = replica2;
 } else {
  replicaToKeep = replica1;
 }
 replicaToDelete = (replicaToKeep == replica1) ? replica2 : replica1;
 if (LOG.isDebugEnabled()) {
  LOG.debug("resolveDuplicateReplicas decide to keep " + replicaToKeep
    + ".  Will try to delete " + replicaToDelete);
 }
 return replicaToDelete;
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Finalizes newReplica by calling finalizeReplica internally.
 *
 * @param newReplicaInfo - ReplicaInfo
 * @param block          - Extended Block
 * @throws IOException
 */
@VisibleForTesting
void finalizeNewReplica(ReplicaInfo newReplicaInfo,
  ExtendedBlock block) throws IOException {
 // Finalize the copied files
 try {
  String bpid = block.getBlockPoolId();
  finalizeReplica(bpid, newReplicaInfo);
  FsVolumeImpl volume = (FsVolumeImpl) newReplicaInfo.getVolume();
  volume.incrNumBlocks(bpid);
 } catch (IOException ioe) {
  // Cleanup block data and metadata
  // Decrement of dfsUsed and noOfBlocks for volume not required
  newReplicaInfo.deleteBlockData();
  newReplicaInfo.deleteMetadata();
  throw ioe;
 }
}
origin: org.apache.hadoop/hadoop-hdfs

 void stopAllDataxceiverThreads(FsVolumeImpl volume) {
  try (AutoCloseableLock lock = datasetLock.acquire()) {
   for (String blockPoolId : volumeMap.getBlockPoolList()) {
    Collection<ReplicaInfo> replicas = volumeMap.replicas(blockPoolId);
    for (ReplicaInfo replicaInfo : replicas) {
     if ((replicaInfo.getState() == ReplicaState.TEMPORARY
       || replicaInfo.getState() == ReplicaState.RBW)
       && replicaInfo.getVolume().equals(volume)) {
      ReplicaInPipeline replicaInPipeline =
        (ReplicaInPipeline) replicaInfo;
      replicaInPipeline.interruptThread();
     }
    }
   }
  }
 }
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Cleanup the replicaInfo object passed.
 *
 * @param bpid           - block pool id
 * @param replicaInfo    - ReplicaInfo
 */
private void cleanupReplica(String bpid, ReplicaInfo replicaInfo) {
 if (replicaInfo.deleteBlockData() || !replicaInfo.blockDataExists()) {
  FsVolumeImpl volume = (FsVolumeImpl) replicaInfo.getVolume();
  volume.onBlockFileDeletion(bpid, replicaInfo.getBytesOnDisk());
  if (replicaInfo.deleteMetadata() || !replicaInfo.metadataExists()) {
   volume.onMetaFileDeletion(bpid, replicaInfo.getMetadataLength());
  }
 }
}
origin: org.apache.hadoop/hadoop-hdfs

@Override // FsDatasetSpi
public InputStream getBlockInputStream(ExtendedBlock b,
  long seekOffset) throws IOException {
 ReplicaInfo info;
 synchronized(this) {
  info = volumeMap.get(b.getBlockPoolId(), b.getLocalBlock());
 }
 if (info != null && info.getVolume().isTransientStorage()) {
  ramDiskReplicaTracker.touch(b.getBlockPoolId(), b.getBlockId());
  datanode.getMetrics().incrRamDiskBlocksReadHits();
 }
 if(info != null && info.blockDataExists()) {
  return info.getDataInputStream(seekOffset);
 } else {
  throw new IOException("No data exists for block " + b);
 }
}
origin: org.apache.hadoop/hadoop-hdfs

FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
ReplicaInPipeline replica = null;
try {
origin: org.apache.hadoop/hadoop-hdfs

public ReplicaInPipeline updateRURCopyOnTruncate(ReplicaInfo rur,
  String bpid, long newBlockId, long recoveryId, long newlength)
  throws IOException {
 rur.breakHardLinksIfNeeded();
 File[] copiedReplicaFiles =
   copyReplicaWithNewBlockIdAndGS(rur, bpid, newBlockId, recoveryId);
 File blockFile = copiedReplicaFiles[1];
 File metaFile = copiedReplicaFiles[0];
 LocalReplica.truncateBlock(rur.getVolume(), blockFile, metaFile,
   rur.getNumBytes(), newlength, fileIoProvider);
 LocalReplicaInPipeline newReplicaInfo = new ReplicaBuilder(ReplicaState.RBW)
   .setBlockId(newBlockId)
   .setGenerationStamp(recoveryId)
   .setFsVolume(this)
   .setDirectoryToUse(blockFile.getParentFile())
   .setBytesToReserve(newlength)
   .buildLocalReplicaInPipeline();
 // In theory, this rbw replica needs to reload last chunk checksum,
 // but it is immediately converted to finalized state within the same lock,
 // so no need to update it.
 return newReplicaInfo;
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Remove the temporary block file (if any)
 */
@Override // FsDatasetSpi
public void unfinalizeBlock(ExtendedBlock b) throws IOException {
 try (AutoCloseableLock lock = datasetLock.acquire()) {
  ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(),
    b.getLocalBlock());
  if (replicaInfo != null &&
    replicaInfo.getState() == ReplicaState.TEMPORARY) {
   // remove from volumeMap
   volumeMap.remove(b.getBlockPoolId(), b.getLocalBlock());
   // delete the on-disk temp file
   if (delBlockFromDisk(replicaInfo)) {
    LOG.warn("Block " + b + " unfinalized and removed. ");
   }
   if (replicaInfo.getVolume().isTransientStorage()) {
    ramDiskReplicaTracker.discardReplica(b.getBlockPoolId(),
      b.getBlockId(), true);
   }
  }
 }
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Returns handles to the block file and its metadata file
 */
@Override // FsDatasetSpi
public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b,
  long blkOffset, long metaOffset) throws IOException {
 try (AutoCloseableLock lock = datasetLock.acquire()) {
  ReplicaInfo info = getReplicaInfo(b);
  FsVolumeReference ref = info.getVolume().obtainReference();
  try {
   InputStream blockInStream = info.getDataInputStream(blkOffset);
   try {
    InputStream metaInStream = info.getMetadataInputStream(metaOffset);
    return new ReplicaInputStreams(
      blockInStream, metaInStream, ref, datanode.getFileIoProvider());
   } catch (IOException e) {
    IOUtils.cleanup(null, blockInStream);
    throw e;
   }
  } catch (IOException e) {
   IOUtils.cleanup(null, ref);
   throw e;
  }
 }
}
origin: org.apache.hadoop/hadoop-hdfs

assert replicaInfo.getVolume() == this:
 "The volume of the replica should be the same as this volume";
origin: org.apache.hadoop/hadoop-hdfs

try (AutoCloseableLock lock = datasetLock.acquire()) {
 ReplicaInfo replicaInfo = recoverCheck(b, newGS, expectedBlockLen);
 FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
 ReplicaInPipeline replica;
 try {
origin: org.apache.hadoop/hadoop-hdfs

 ((FinalizedReplica)newReplicaInfo).loadLastPartialChunkChecksum();
} else {
 FsVolumeImpl v = (FsVolumeImpl)replicaInfo.getVolume();
 if (v == null) {
  throw new IOException("No volume for block " + replicaInfo);
org.apache.hadoop.hdfs.server.datanodeReplicaInfogetVolume

Javadoc

Get the volume where this replica is located on disk

Popular methods of ReplicaInfo

  • getBlockId
  • getBytesOnDisk
  • getGenerationStamp
  • getNumBytes
  • getState
  • getBlockFile
    Get the full path of this replica's data file
  • getMetaFile
    Get the full path of this replica's meta file
  • getBlockName
  • getBytesReserved
    Number of bytes reserved for this replica on disk.
  • getStorageUuid
    Get the storageUuid of the volume that stores this replica.
  • getVisibleLength
  • isOnTransientStorage
  • getVisibleLength,
  • isOnTransientStorage,
  • setGenerationStamp,
  • setNumBytes,
  • equals,
  • getDir,
  • hashCode,
  • isUnlinked,
  • parseBaseDir

Popular in Java

  • Reading from database using SQL prepared statement
  • onRequestPermissionsResult (Fragment)
  • getContentResolver (Context)
  • findViewById (Activity)
  • BigDecimal (java.math)
    An immutable arbitrary-precision signed decimal.A value is represented by an arbitrary-precision "un
  • ServerSocket (java.net)
    This class represents a server-side socket that waits for incoming client connections. A ServerSocke
  • MessageDigest (java.security)
    Uses a one-way hash function to turn an arbitrary number of bytes into a fixed-length byte sequence.
  • List (java.util)
    An ordered collection (also known as a sequence). The user of this interface has precise control ove
  • Stream (java.util.stream)
    A sequence of elements supporting sequential and parallel aggregate operations. The following exampl
  • JComboBox (javax.swing)
  • Top 17 Free Sublime Text Plugins
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimAtomGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyStudentsTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now