Tabnine Logo
ReplicaInfo.getMetaFile
Code IndexAdd Tabnine to your IDE (free)

How to use
getMetaFile
method
in
org.apache.hadoop.hdfs.server.datanode.ReplicaInfo

Best Java code snippets using org.apache.hadoop.hdfs.server.datanode.ReplicaInfo.getMetaFile (Showing top 20 results out of 315)

origin: ch.cern.hadoop/hadoop-hdfs

private void deleteReplica(final ReplicaInfo replicaToDelete) {
 // Delete the files on disk. Failure here is okay.
 final File blockFile = replicaToDelete.getBlockFile();
 if (!blockFile.delete()) {
  LOG.warn("Failed to delete block file " + blockFile);
 }
 final File metaFile = replicaToDelete.getMetaFile();
 if (!metaFile.delete()) {
  LOG.warn("Failed to delete meta file " + metaFile);
 }
}
origin: org.apache.hadoop/hadoop-hdfs-test

/** Delete block meta file */
private long deleteMetaFile() {
 synchronized(fds) {
  for (ReplicaInfo b : fds.volumeMap.replicas()) {
   File file = b.getMetaFile();
   // Delete a metadata file
   if (file.exists() && file.delete()) {
    LOG.info("Deleting metadata file " + file.getAbsolutePath());
    return b.getBlockId();
   }
  }
 }
 return 0;
}
origin: org.apache.hadoop/hadoop-hdfs-test

/** Truncate a block file */
private long truncateBlockFile() throws IOException {
 synchronized (fds) {
  for (ReplicaInfo b : fds.volumeMap.replicas()) {
   File f = b.getBlockFile();
   File mf = b.getMetaFile();
   // Truncate a block file that has a corresponding metadata file
   if (f.exists() && f.length() != 0 && mf.exists()) {
    FileOutputStream s = new FileOutputStream(f);
    FileChannel channel = s.getChannel();
    channel.truncate(0);
    LOG.info("Truncated block file " + f.getAbsolutePath());
    return b.getBlockId();
   }
  }
 }
 return 0;
}
origin: org.apache.hadoop/hadoop-hdfs-test

/** Delete a block file */
private long deleteBlockFile() {
 synchronized(fds) {
  for (ReplicaInfo b : fds.volumeMap.replicas()) {
   File f = b.getBlockFile();
   File mf = b.getMetaFile();
   // Delete a block file that has corresponding metadata file
   if (f.exists() && mf.exists() && f.delete()) {
    LOG.info("Deleting block file " + f.getAbsolutePath());
    return b.getBlockId();
   }
  }
 }
 return 0;
}
origin: io.prestosql.hadoop/hadoop-apache

private void deleteReplica(final ReplicaInfo replicaToDelete) {
 // Delete the files on disk. Failure here is okay.
 final File blockFile = replicaToDelete.getBlockFile();
 if (!blockFile.delete()) {
  LOG.warn("Failed to delete block file " + blockFile);
 }
 final File metaFile = replicaToDelete.getMetaFile();
 if (!metaFile.delete()) {
  LOG.warn("Failed to delete meta file " + metaFile);
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Bump a replica's generation stamp to a new one.
 * Its on-disk meta file name is renamed to be the new one too.
 * 
 * @param replicaInfo a replica
 * @param newGS new generation stamp
 * @throws IOException if rename fails
 */
private void bumpReplicaGS(ReplicaInfo replicaInfo, 
  long newGS) throws IOException { 
 long oldGS = replicaInfo.getGenerationStamp();
 File oldmeta = replicaInfo.getMetaFile();
 replicaInfo.setGenerationStamp(newGS);
 File newmeta = replicaInfo.getMetaFile();
 // rename meta file to new GS
 if (LOG.isDebugEnabled()) {
  LOG.debug("Renaming " + oldmeta + " to " + newmeta);
 }
 try {
  NativeIO.renameTo(oldmeta, newmeta);
 } catch (IOException e) {
  replicaInfo.setGenerationStamp(oldGS); // restore old GS
  throw new IOException("Block " + replicaInfo + " reopen failed. " +
             " Unable to move meta file  " + oldmeta +
             " to " + newmeta, e);
 }
}
origin: org.apache.hadoop/hadoop-hdfs-test

 private static void createUnlinkTmpFile(ReplicaInfo replicaInfo, 
   boolean changeBlockFile, 
   boolean isRename) throws IOException {
  File src;
  if (changeBlockFile) {
   src = replicaInfo.getBlockFile();
  } else {
   src = replicaInfo.getMetaFile();
  }
  File dst = FSDataset.getUnlinkTmpFile(src);
  if (isRename) {
   src.renameTo(dst);
  } else {
   FileInputStream in = new FileInputStream(src);
   try {
    FileOutputStream out = new FileOutputStream(dst);
    try {
     IOUtils.copyBytes(in, out, 1);
    } finally {
     out.close();
    }
   } finally {
    in.close();
   }
  }
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

/** Delete block meta file */
private long deleteMetaFile() {
 synchronized(fds) {
  for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
   File file = b.getMetaFile();
   // Delete a metadata file
   if (file.exists() && file.delete()) {
    LOG.info("Deleting metadata file " + file.getAbsolutePath());
    return b.getBlockId();
   }
  }
 }
 return 0;
}
origin: io.prestosql.hadoop/hadoop-apache

/**
 * Bump a replica's generation stamp to a new one.
 * Its on-disk meta file name is renamed to be the new one too.
 * 
 * @param replicaInfo a replica
 * @param newGS new generation stamp
 * @throws IOException if rename fails
 */
private void bumpReplicaGS(ReplicaInfo replicaInfo, 
  long newGS) throws IOException { 
 long oldGS = replicaInfo.getGenerationStamp();
 File oldmeta = replicaInfo.getMetaFile();
 replicaInfo.setGenerationStamp(newGS);
 File newmeta = replicaInfo.getMetaFile();
 // rename meta file to new GS
 if (LOG.isDebugEnabled()) {
  LOG.debug("Renaming " + oldmeta + " to " + newmeta);
 }
 try {
  NativeIO.renameTo(oldmeta, newmeta);
 } catch (IOException e) {
  replicaInfo.setGenerationStamp(oldGS); // restore old GS
  throw new IOException("Block " + replicaInfo + " reopen failed. " +
             " Unable to move meta file  " + oldmeta +
             " to " + newmeta, e);
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

/** Delete a block file */
private long deleteBlockFile() {
 synchronized(fds) {
  for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
   File f = b.getBlockFile();
   File mf = b.getMetaFile();
   // Delete a block file that has corresponding metadata file
   if (f.exists() && mf.exists() && f.delete()) {
    LOG.info("Deleting block file " + f.getAbsolutePath());
    return b.getBlockId();
   }
  }
 }
 return 0;
}
origin: ch.cern.hadoop/hadoop-hdfs

private static void createUnlinkTmpFile(ReplicaInfo replicaInfo, 
  boolean changeBlockFile, 
  boolean isRename) throws IOException {
 File src;
 if (changeBlockFile) {
  src = replicaInfo.getBlockFile();
 } else {
  src = replicaInfo.getMetaFile();
 }
 File dst = DatanodeUtil.getUnlinkTmpFile(src);
 if (isRename) {
  src.renameTo(dst);
 } else {
  FileInputStream in = new FileInputStream(src);
  try {
   FileOutputStream out = new FileOutputStream(dst);
   try {
    IOUtils.copyBytes(in, out, 1);
   } finally {
    out.close();
   }
  } finally {
   in.close();
  }
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

/** Truncate a block file */
private long truncateBlockFile() throws IOException {
 synchronized (fds) {
  for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
   File f = b.getBlockFile();
   File mf = b.getMetaFile();
   // Truncate a block file that has a corresponding metadata file
   if (f.exists() && f.length() != 0 && mf.exists()) {
    FileOutputStream s = null;
    FileChannel channel = null;
    try {
     s = new FileOutputStream(f);
     channel = s.getChannel();
     channel.truncate(0);
     LOG.info("Truncated block file " + f.getAbsolutePath());
     return b.getBlockId();
    } finally {
     IOUtils.cleanup(LOG, channel, s);
    }
   }
  }
 }
 return 0;
}
origin: org.apache.hadoop/hadoop-hdfs-test

private void setup(FSDataset dataSet) throws IOException {
 // setup replicas map
 ReplicasMap replicasMap = dataSet.volumeMap;
 FSVolume vol = dataSet.volumes.getNextVolume(0);
 ReplicaInfo replicaInfo = new FinalizedReplica(
   blocks[FINALIZED], vol, vol.getDir());
 replicasMap.add(replicaInfo);
 replicaInfo.getBlockFile().createNewFile();
 replicaInfo.getMetaFile().createNewFile();
 
 replicasMap.add(new ReplicaInPipeline(
   blocks[TEMPORARY].getBlockId(),
   blocks[TEMPORARY].getGenerationStamp(), vol, 
   vol.createTmpFile(blocks[TEMPORARY]).getParentFile()));
 
 replicaInfo = new ReplicaBeingWritten(blocks[RBW], vol, 
   vol.createRbwFile(blocks[RBW]).getParentFile(), null);
 replicasMap.add(replicaInfo);
 replicaInfo.getBlockFile().createNewFile();
 replicaInfo.getMetaFile().createNewFile();
 
 replicasMap.add(new ReplicaWaitingToBeRecovered(blocks[RWR], vol, 
   vol.createRbwFile(blocks[RWR]).getParentFile()));
 replicasMap.add(new ReplicaUnderRecovery(
   new FinalizedReplica(blocks[RUR], vol, vol.getDir()), 2007));    
}

origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Remove a hard link by copying the block to a temporary place and 
 * then moving it back
 * @param numLinks number of hard links
 * @return true if copy is successful; 
 *         false if it is already detached or no need to be detached
 * @throws IOException if there is any copy error
 */
public boolean unlinkBlock(int numLinks) throws IOException {
 if (isUnlinked()) {
  return false;
 }
 File file = getBlockFile();
 if (file == null || getVolume() == null) {
  throw new IOException("detachBlock:Block not found. " + this);
 }
 File meta = getMetaFile();
 if (HardLink.getLinkCount(file) > numLinks) {
  DataNode.LOG.info("CopyOnWrite for block " + this);
  unlinkFile(file, this);
 }
 if (HardLink.getLinkCount(meta) > numLinks) {
  unlinkFile(meta, this);
 }
 setUnlinked();
 return true;
}
origin: ch.cern.hadoop/hadoop-hdfs

File sourceMeta = b.getMetaFile();
String sourceRoot = b.getVolume().getBasePath();
String destRoot = v.getBasePath();
origin: io.prestosql.hadoop/hadoop-apache

/**
 * Remove a hard link by copying the block to a temporary place and 
 * then moving it back
 * @param numLinks number of hard links
 * @return true if copy is successful; 
 *         false if it is already detached or no need to be detached
 * @throws IOException if there is any copy error
 */
public boolean unlinkBlock(int numLinks) throws IOException {
 if (isUnlinked()) {
  return false;
 }
 File file = getBlockFile();
 if (file == null || getVolume() == null) {
  throw new IOException("detachBlock:Block not found. " + this);
 }
 File meta = getMetaFile();
 if (HardLink.getLinkCount(file) > numLinks) {
  DataNode.LOG.info("CopyOnWrite for block " + this);
  unlinkFile(file, this);
 }
 if (HardLink.getLinkCount(meta) > numLinks) {
  unlinkFile(meta, this);
 }
 setUnlinked();
 return true;
}
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Returns handles to the block file and its metadata file
 */
@Override // FsDatasetSpi
public synchronized ReplicaInputStreams getTmpInputStreams(ExtendedBlock b,
  long blkOffset, long metaOffset) throws IOException {
 ReplicaInfo info = getReplicaInfo(b);
 FsVolumeReference ref = info.getVolume().obtainReference();
 try {
  InputStream blockInStream = openAndSeek(info.getBlockFile(), blkOffset);
  try {
   InputStream metaInStream = openAndSeek(info.getMetaFile(), metaOffset);
   return new ReplicaInputStreams(blockInStream, metaInStream, ref);
  } catch (IOException e) {
   IOUtils.cleanup(null, blockInStream);
   throw e;
  }
 } catch (IOException e) {
  IOUtils.cleanup(null, ref);
  throw e;
 }
}
origin: io.prestosql.hadoop/hadoop-apache

/**
 * Returns handles to the block file and its metadata file
 */
@Override // FsDatasetSpi
public synchronized ReplicaInputStreams getTmpInputStreams(ExtendedBlock b,
  long blkOffset, long metaOffset) throws IOException {
 ReplicaInfo info = getReplicaInfo(b);
 FsVolumeReference ref = info.getVolume().obtainReference();
 try {
  InputStream blockInStream = openAndSeek(info.getBlockFile(), blkOffset);
  try {
   InputStream metaInStream = openAndSeek(info.getMetaFile(), metaOffset);
   return new ReplicaInputStreams(blockInStream, metaInStream, ref);
  } catch (IOException e) {
   IOUtils.cleanup(null, blockInStream);
   throw e;
  }
 } catch (IOException e) {
  IOUtils.cleanup(null, ref);
  throw e;
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Remove the temporary block file (if any)
 */
@Override // FsDatasetSpi
public synchronized void unfinalizeBlock(ExtendedBlock b) throws IOException {
 ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), 
   b.getLocalBlock());
 if (replicaInfo != null && replicaInfo.getState() == ReplicaState.TEMPORARY) {
  // remove from volumeMap
  volumeMap.remove(b.getBlockPoolId(), b.getLocalBlock());
  
  // delete the on-disk temp file
  if (delBlockFromDisk(replicaInfo.getBlockFile(), 
    replicaInfo.getMetaFile(), b.getLocalBlock())) {
   LOG.warn("Block " + b + " unfinalized and removed. " );
  }
  if (replicaInfo.getVolume().isTransientStorage()) {
   ramDiskReplicaTracker.discardReplica(b.getBlockPoolId(), b.getBlockId(), true);
  }
 }
}
origin: io.prestosql.hadoop/hadoop-apache

/**
 * Remove the temporary block file (if any)
 */
@Override // FsDatasetSpi
public synchronized void unfinalizeBlock(ExtendedBlock b) throws IOException {
 ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), 
   b.getLocalBlock());
 if (replicaInfo != null && replicaInfo.getState() == ReplicaState.TEMPORARY) {
  // remove from volumeMap
  volumeMap.remove(b.getBlockPoolId(), b.getLocalBlock());
  
  // delete the on-disk temp file
  if (delBlockFromDisk(replicaInfo.getBlockFile(), 
    replicaInfo.getMetaFile(), b.getLocalBlock())) {
   LOG.warn("Block " + b + " unfinalized and removed. " );
  }
  if (replicaInfo.getVolume().isTransientStorage()) {
   ramDiskReplicaTracker.discardReplica(b.getBlockPoolId(), b.getBlockId(), true);
  }
 }
}
org.apache.hadoop.hdfs.server.datanodeReplicaInfogetMetaFile

Javadoc

Get the full path of this replica's meta file

Popular methods of ReplicaInfo

  • getBlockId
  • getBytesOnDisk
  • getGenerationStamp
  • getNumBytes
  • getState
  • getVolume
    Get the volume where this replica is located on disk
  • getBlockFile
    Get the full path of this replica's data file
  • getBlockName
  • getBytesReserved
    Number of bytes reserved for this replica on disk.
  • getStorageUuid
    Get the storageUuid of the volume that stores this replica.
  • getVisibleLength
  • isOnTransientStorage
  • getVisibleLength,
  • isOnTransientStorage,
  • setGenerationStamp,
  • setNumBytes,
  • equals,
  • getDir,
  • hashCode,
  • isUnlinked,
  • parseBaseDir

Popular in Java

  • Start an intent from android
  • getApplicationContext (Context)
  • setScale (BigDecimal)
  • startActivity (Activity)
  • Color (java.awt)
    The Color class is used to encapsulate colors in the default sRGB color space or colors in arbitrary
  • OutputStream (java.io)
    A writable sink for bytes.Most clients will use output streams that write data to the file system (
  • Socket (java.net)
    Provides a client-side TCP socket.
  • ByteBuffer (java.nio)
    A buffer for bytes. A byte buffer can be created in either one of the following ways: * #allocate
  • Random (java.util)
    This class provides methods that return pseudo-random values.It is dangerous to seed Random with the
  • Stack (java.util)
    Stack is a Last-In/First-Out(LIFO) data structure which represents a stack of objects. It enables u
  • Top 17 Plugins for Android Studio
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimAtomGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyStudentsTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now