Tabnine Logo
ReplicaInfo.getBlockFile
Code IndexAdd Tabnine to your IDE (free)

How to use
getBlockFile
method
in
org.apache.hadoop.hdfs.server.datanode.ReplicaInfo

Best Java code snippets using org.apache.hadoop.hdfs.server.datanode.ReplicaInfo.getBlockFile (Showing top 20 results out of 315)

origin: ch.cern.hadoop/hadoop-hdfs

private void deleteReplica(final ReplicaInfo replicaToDelete) {
 // Delete the files on disk. Failure here is okay.
 final File blockFile = replicaToDelete.getBlockFile();
 if (!blockFile.delete()) {
  LOG.warn("Failed to delete block file " + blockFile);
 }
 final File metaFile = replicaToDelete.getMetaFile();
 if (!metaFile.delete()) {
  LOG.warn("Failed to delete meta file " + metaFile);
 }
}
origin: org.apache.hadoop/hadoop-hdfs-test

/** Delete a block file */
private long deleteBlockFile() {
 synchronized(fds) {
  for (ReplicaInfo b : fds.volumeMap.replicas()) {
   File f = b.getBlockFile();
   File mf = b.getMetaFile();
   // Delete a block file that has corresponding metadata file
   if (f.exists() && mf.exists() && f.delete()) {
    LOG.info("Deleting block file " + f.getAbsolutePath());
    return b.getBlockId();
   }
  }
 }
 return 0;
}
origin: org.apache.hadoop/hadoop-hdfs-test

/** Truncate a block file */
private long truncateBlockFile() throws IOException {
 synchronized (fds) {
  for (ReplicaInfo b : fds.volumeMap.replicas()) {
   File f = b.getBlockFile();
   File mf = b.getMetaFile();
   // Truncate a block file that has a corresponding metadata file
   if (f.exists() && f.length() != 0 && mf.exists()) {
    FileOutputStream s = new FileOutputStream(f);
    FileChannel channel = s.getChannel();
    channel.truncate(0);
    LOG.info("Truncated block file " + f.getAbsolutePath());
    return b.getBlockId();
   }
  }
 }
 return 0;
}
origin: ch.cern.hadoop/hadoop-hdfs

/** Check the files of a replica. */
static void checkReplicaFiles(final ReplicaInfo r) throws IOException {
 //check replica's file
 final File f = r.getBlockFile();
 if (!f.exists()) {
  throw new FileNotFoundException("File " + f + " not found, r=" + r);
 }
 if (r.getBytesOnDisk() != f.length()) {
  throw new IOException("File length mismatched.  The length of "
    + f + " is " + f.length() + " but r=" + r);
 }
 //check replica's meta file
 final File metafile = FsDatasetUtil.getMetaFile(f, r.getGenerationStamp());
 if (!metafile.exists()) {
  throw new IOException("Metafile " + metafile + " does not exist, r=" + r);
 }
 if (metafile.length() == 0) {
  throw new IOException("Metafile " + metafile + " is empty, r=" + r);
 }
}
origin: io.prestosql.hadoop/hadoop-apache

/** Check the files of a replica. */
static void checkReplicaFiles(final ReplicaInfo r) throws IOException {
 //check replica's file
 final File f = r.getBlockFile();
 if (!f.exists()) {
  throw new FileNotFoundException("File " + f + " not found, r=" + r);
 }
 if (r.getBytesOnDisk() != f.length()) {
  throw new IOException("File length mismatched.  The length of "
    + f + " is " + f.length() + " but r=" + r);
 }
 //check replica's meta file
 final File metafile = FsDatasetUtil.getMetaFile(f, r.getGenerationStamp());
 if (!metafile.exists()) {
  throw new IOException("Metafile " + metafile + " does not exist, r=" + r);
 }
 if (metafile.length() == 0) {
  throw new IOException("Metafile " + metafile + " is empty, r=" + r);
 }
}
origin: io.prestosql.hadoop/hadoop-apache

private void deleteReplica(final ReplicaInfo replicaToDelete) {
 // Delete the files on disk. Failure here is okay.
 final File blockFile = replicaToDelete.getBlockFile();
 if (!blockFile.delete()) {
  LOG.warn("Failed to delete block file " + blockFile);
 }
 final File metaFile = replicaToDelete.getMetaFile();
 if (!metaFile.delete()) {
  LOG.warn("Failed to delete meta file " + metaFile);
 }
}
origin: io.prestosql.hadoop/hadoop-apache

@Override  //Object
public String toString() {
 return getClass().getSimpleName()
   + ", " + super.toString()
   + ", " + getState()
   + "\n  getNumBytes()     = " + getNumBytes()
   + "\n  getBytesOnDisk()  = " + getBytesOnDisk()
   + "\n  getVisibleLength()= " + getVisibleLength()
   + "\n  getVolume()       = " + getVolume()
   + "\n  getBlockFile()    = " + getBlockFile();
}
origin: ch.cern.hadoop/hadoop-hdfs

@Override  //Object
public String toString() {
 return getClass().getSimpleName()
   + ", " + super.toString()
   + ", " + getState()
   + "\n  getNumBytes()     = " + getNumBytes()
   + "\n  getBytesOnDisk()  = " + getBytesOnDisk()
   + "\n  getVisibleLength()= " + getVisibleLength()
   + "\n  getVolume()       = " + getVolume()
   + "\n  getBlockFile()    = " + getBlockFile();
}
origin: ch.cern.hadoop/hadoop-hdfs

/** Delete a block file */
private long deleteBlockFile() {
 synchronized(fds) {
  for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
   File f = b.getBlockFile();
   File mf = b.getMetaFile();
   // Delete a block file that has corresponding metadata file
   if (f.exists() && mf.exists() && f.delete()) {
    LOG.info("Deleting block file " + f.getAbsolutePath());
    return b.getBlockId();
   }
  }
 }
 return 0;
}
origin: ch.cern.hadoop/hadoop-hdfs

/** Truncate a block file */
private long truncateBlockFile() throws IOException {
 synchronized (fds) {
  for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
   File f = b.getBlockFile();
   File mf = b.getMetaFile();
   // Truncate a block file that has a corresponding metadata file
   if (f.exists() && f.length() != 0 && mf.exists()) {
    FileOutputStream s = null;
    FileChannel channel = null;
    try {
     s = new FileOutputStream(f);
     channel = s.getChannel();
     channel.truncate(0);
     LOG.info("Truncated block file " + f.getAbsolutePath());
     return b.getBlockId();
    } finally {
     IOUtils.cleanup(LOG, channel, s);
    }
   }
  }
 }
 return 0;
}
origin: ch.cern.hadoop/hadoop-hdfs

private static void createUnlinkTmpFile(ReplicaInfo replicaInfo, 
  boolean changeBlockFile, 
  boolean isRename) throws IOException {
 File src;
 if (changeBlockFile) {
  src = replicaInfo.getBlockFile();
 } else {
  src = replicaInfo.getMetaFile();
 }
 File dst = DatanodeUtil.getUnlinkTmpFile(src);
 if (isRename) {
  src.renameTo(dst);
 } else {
  FileInputStream in = new FileInputStream(src);
  try {
   FileOutputStream out = new FileOutputStream(dst);
   try {
    IOUtils.copyBytes(in, out, 1);
   } finally {
    out.close();
   }
  } finally {
   in.close();
  }
 }
}
origin: org.apache.hadoop/hadoop-hdfs-test

private void setup(FSDataset dataSet) throws IOException {
 // setup replicas map
 ReplicasMap replicasMap = dataSet.volumeMap;
 FSVolume vol = dataSet.volumes.getNextVolume(0);
 ReplicaInfo replicaInfo = new FinalizedReplica(
   blocks[FINALIZED], vol, vol.getDir());
 replicasMap.add(replicaInfo);
 replicaInfo.getBlockFile().createNewFile();
 replicaInfo.getMetaFile().createNewFile();
 
 replicasMap.add(new ReplicaInPipeline(
   blocks[TEMPORARY].getBlockId(),
   blocks[TEMPORARY].getGenerationStamp(), vol, 
   vol.createTmpFile(blocks[TEMPORARY]).getParentFile()));
 
 replicaInfo = new ReplicaBeingWritten(blocks[RBW], vol, 
   vol.createRbwFile(blocks[RBW]).getParentFile(), null);
 replicasMap.add(replicaInfo);
 replicaInfo.getBlockFile().createNewFile();
 replicaInfo.getMetaFile().createNewFile();
 
 replicasMap.add(new ReplicaWaitingToBeRecovered(blocks[RWR], vol, 
   vol.createRbwFile(blocks[RWR]).getParentFile()));
 replicasMap.add(new ReplicaUnderRecovery(
   new FinalizedReplica(blocks[RUR], vol, vol.getDir()), 2007));    
}

origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Turn the block identifier into a filename
 * @param bpid Block pool Id
 * @param blockId a block's id
 * @return on disk data file path; null if the replica does not exist
 */
File getFile(final String bpid, final long blockId, boolean touch) {
 ReplicaInfo info = volumeMap.get(bpid, blockId);
 if (info != null) {
  if (touch && info.getVolume().isTransientStorage()) {
   ramDiskReplicaTracker.touch(bpid, blockId);
   datanode.getMetrics().incrRamDiskBlocksReadHits();
  }
  return info.getBlockFile();
 }
 return null;    
}
origin: io.prestosql.hadoop/hadoop-apache

/**
 * Turn the block identifier into a filename
 * @param bpid Block pool Id
 * @param blockId a block's id
 * @return on disk data file path; null if the replica does not exist
 */
File getFile(final String bpid, final long blockId, boolean touch) {
 ReplicaInfo info = volumeMap.get(bpid, blockId);
 if (info != null) {
  if (touch && info.getVolume().isTransientStorage()) {
   ramDiskReplicaTracker.touch(bpid, blockId);
   datanode.getMetrics().incrRamDiskBlocksReadHits();
  }
  return info.getBlockFile();
 }
 return null;    
}
origin: io.prestosql.hadoop/hadoop-apache

@VisibleForTesting
static ReplicaInfo selectReplicaToDelete(final ReplicaInfo replica1,
  final ReplicaInfo replica2) {
 ReplicaInfo replicaToKeep;
 ReplicaInfo replicaToDelete;
 // it's the same block so don't ever delete it, even if GS or size
 // differs.  caller should keep the one it just discovered on disk
 if (replica1.getBlockFile().equals(replica2.getBlockFile())) {
  return null;
 }
 if (replica1.getGenerationStamp() != replica2.getGenerationStamp()) {
  replicaToKeep = replica1.getGenerationStamp() > replica2.getGenerationStamp()
    ? replica1 : replica2;
 } else if (replica1.getNumBytes() != replica2.getNumBytes()) {
  replicaToKeep = replica1.getNumBytes() > replica2.getNumBytes() ?
    replica1 : replica2;
 } else if (replica1.getVolume().isTransientStorage() &&
       !replica2.getVolume().isTransientStorage()) {
  replicaToKeep = replica2;
 } else {
  replicaToKeep = replica1;
 }
 replicaToDelete = (replicaToKeep == replica1) ? replica2 : replica1;
 if (LOG.isDebugEnabled()) {
  LOG.debug("resolveDuplicateReplicas decide to keep " + replicaToKeep
    + ".  Will try to delete " + replicaToDelete);
 }
 return replicaToDelete;
}
origin: ch.cern.hadoop/hadoop-hdfs

private synchronized FinalizedReplica finalizeReplica(String bpid,
  ReplicaInfo replicaInfo) throws IOException {
 FinalizedReplica newReplicaInfo = null;
 if (replicaInfo.getState() == ReplicaState.RUR &&
   ((ReplicaUnderRecovery)replicaInfo).getOriginalReplica().getState() == 
    ReplicaState.FINALIZED) {
  newReplicaInfo = (FinalizedReplica)
      ((ReplicaUnderRecovery)replicaInfo).getOriginalReplica();
 } else {
  FsVolumeImpl v = (FsVolumeImpl)replicaInfo.getVolume();
  File f = replicaInfo.getBlockFile();
  if (v == null) {
   throw new IOException("No volume for temporary file " + f +
     " for block " + replicaInfo);
  }
  File dest = v.addFinalizedBlock(
    bpid, replicaInfo, f, replicaInfo.getBytesReserved());
  newReplicaInfo = new FinalizedReplica(replicaInfo, v, dest.getParentFile());
  if (v.isTransientStorage()) {
   ramDiskReplicaTracker.addReplica(bpid, replicaInfo.getBlockId(), v);
   datanode.getMetrics().addRamDiskBytesWrite(replicaInfo.getNumBytes());
  }
 }
 volumeMap.add(bpid, newReplicaInfo);
 return newReplicaInfo;
}
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Returns handles to the block file and its metadata file
 */
@Override // FsDatasetSpi
public synchronized ReplicaInputStreams getTmpInputStreams(ExtendedBlock b,
  long blkOffset, long metaOffset) throws IOException {
 ReplicaInfo info = getReplicaInfo(b);
 FsVolumeReference ref = info.getVolume().obtainReference();
 try {
  InputStream blockInStream = openAndSeek(info.getBlockFile(), blkOffset);
  try {
   InputStream metaInStream = openAndSeek(info.getMetaFile(), metaOffset);
   return new ReplicaInputStreams(blockInStream, metaInStream, ref);
  } catch (IOException e) {
   IOUtils.cleanup(null, blockInStream);
   throw e;
  }
 } catch (IOException e) {
  IOUtils.cleanup(null, ref);
  throw e;
 }
}
origin: io.prestosql.hadoop/hadoop-apache

/**
 * Returns handles to the block file and its metadata file
 */
@Override // FsDatasetSpi
public synchronized ReplicaInputStreams getTmpInputStreams(ExtendedBlock b,
  long blkOffset, long metaOffset) throws IOException {
 ReplicaInfo info = getReplicaInfo(b);
 FsVolumeReference ref = info.getVolume().obtainReference();
 try {
  InputStream blockInStream = openAndSeek(info.getBlockFile(), blkOffset);
  try {
   InputStream metaInStream = openAndSeek(info.getMetaFile(), metaOffset);
   return new ReplicaInputStreams(blockInStream, metaInStream, ref);
  } catch (IOException e) {
   IOUtils.cleanup(null, blockInStream);
   throw e;
  }
 } catch (IOException e) {
  IOUtils.cleanup(null, ref);
  throw e;
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Remove the temporary block file (if any)
 */
@Override // FsDatasetSpi
public synchronized void unfinalizeBlock(ExtendedBlock b) throws IOException {
 ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), 
   b.getLocalBlock());
 if (replicaInfo != null && replicaInfo.getState() == ReplicaState.TEMPORARY) {
  // remove from volumeMap
  volumeMap.remove(b.getBlockPoolId(), b.getLocalBlock());
  
  // delete the on-disk temp file
  if (delBlockFromDisk(replicaInfo.getBlockFile(), 
    replicaInfo.getMetaFile(), b.getLocalBlock())) {
   LOG.warn("Block " + b + " unfinalized and removed. " );
  }
  if (replicaInfo.getVolume().isTransientStorage()) {
   ramDiskReplicaTracker.discardReplica(b.getBlockPoolId(), b.getBlockId(), true);
  }
 }
}
origin: io.prestosql.hadoop/hadoop-apache

/**
 * Remove the temporary block file (if any)
 */
@Override // FsDatasetSpi
public synchronized void unfinalizeBlock(ExtendedBlock b) throws IOException {
 ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), 
   b.getLocalBlock());
 if (replicaInfo != null && replicaInfo.getState() == ReplicaState.TEMPORARY) {
  // remove from volumeMap
  volumeMap.remove(b.getBlockPoolId(), b.getLocalBlock());
  
  // delete the on-disk temp file
  if (delBlockFromDisk(replicaInfo.getBlockFile(), 
    replicaInfo.getMetaFile(), b.getLocalBlock())) {
   LOG.warn("Block " + b + " unfinalized and removed. " );
  }
  if (replicaInfo.getVolume().isTransientStorage()) {
   ramDiskReplicaTracker.discardReplica(b.getBlockPoolId(), b.getBlockId(), true);
  }
 }
}
org.apache.hadoop.hdfs.server.datanodeReplicaInfogetBlockFile

Javadoc

Get the full path of this replica's data file

Popular methods of ReplicaInfo

  • getBlockId
  • getBytesOnDisk
  • getGenerationStamp
  • getNumBytes
  • getState
  • getVolume
    Get the volume where this replica is located on disk
  • getMetaFile
    Get the full path of this replica's meta file
  • getBlockName
  • getBytesReserved
    Number of bytes reserved for this replica on disk.
  • getStorageUuid
    Get the storageUuid of the volume that stores this replica.
  • getVisibleLength
  • isOnTransientStorage
  • getVisibleLength,
  • isOnTransientStorage,
  • setGenerationStamp,
  • setNumBytes,
  • equals,
  • getDir,
  • hashCode,
  • isUnlinked,
  • parseBaseDir

Popular in Java

  • Reading from database using SQL prepared statement
  • findViewById (Activity)
  • getContentResolver (Context)
  • setContentView (Activity)
  • Point (java.awt)
    A point representing a location in (x,y) coordinate space, specified in integer precision.
  • SecureRandom (java.security)
    This class generates cryptographically secure pseudo-random numbers. It is best to invoke SecureRand
  • ArrayList (java.util)
    ArrayList is an implementation of List, backed by an array. All optional operations including adding
  • TimerTask (java.util)
    The TimerTask class represents a task to run at a specified time. The task may be run once or repeat
  • JTable (javax.swing)
  • IsNull (org.hamcrest.core)
    Is the value null?
  • Github Copilot alternatives
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now