Tabnine Logo
FsVolumeReference
Code IndexAdd Tabnine to your IDE (free)

How to use
FsVolumeReference
in
org.apache.hadoop.hdfs.server.datanode.fsdataset

Best Java code snippets using org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference (Showing top 20 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

@Override
public void close() throws IOException {
 if (this.volumeReference != null) {
  volumeReference.close();
 }
}
origin: org.apache.hadoop/hadoop-hdfs

ReplicaFileDeleteTask(FsVolumeReference volumeRef,
  ReplicaInfo replicaToDelete, ExtendedBlock block,
  String trashDirectory) {
 this.volumeRef = volumeRef;
 this.volume = (FsVolumeImpl) volumeRef.getVolume();
 this.replicaToDelete = replicaToDelete;
 this.block = block;
 this.trashDirectory = trashDirectory;
}
origin: ch.cern.hadoop/hadoop-hdfs

 File oldBlockFile = replicaInfo.getBlockFile();
 File oldMetaFile = replicaInfo.getMetaFile();
 FsVolumeImpl targetVolume = (FsVolumeImpl) volumeRef.getVolume();
} finally {
 if (volumeRef != null) {
  volumeRef.close();
origin: io.prestosql.hadoop/hadoop-apache

 File oldBlockFile = replicaInfo.getBlockFile();
 File oldMetaFile = replicaInfo.getMetaFile();
 FsVolumeImpl targetVolume = (FsVolumeImpl) volumeRef.getVolume();
} finally {
 if (volumeRef != null) {
  volumeRef.close();
origin: org.apache.hadoop/hadoop-hdfs

private void markHealthy() {
 synchronized (DatasetVolumeChecker.this) {
  healthyVolumes.add(reference.getVolume());
 }
}
origin: org.apache.hadoop/hadoop-hdfs

 @Override
 public void close() throws IOException {
  IOException ioe = null;
  for (FsVolumeReference ref : references) {
   try {
    ref.close();
   } catch (IOException e) {
    ioe = e;
   }
  }
  references.clear();
  if (ioe != null) {
   throw ioe;
  }
 }
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Get the volume for a given index.
 */
public FsVolumeSpi get(int index) {
 return references.get(index).getVolume();
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Moves a given block from one volume to another volume. This is used by disk
 * balancer.
 *
 * @param block       - ExtendedBlock
 * @param destination - Destination volume
 * @return Old replica info
 */
@Override
public ReplicaInfo moveBlockAcrossVolumes(ExtendedBlock block, FsVolumeSpi
  destination) throws IOException {
 ReplicaInfo replicaInfo = getReplicaInfo(block);
 if (replicaInfo.getState() != ReplicaState.FINALIZED) {
  throw new ReplicaNotFoundException(
    ReplicaNotFoundException.UNFINALIZED_REPLICA + block);
 }
 FsVolumeReference volumeRef = null;
 try (AutoCloseableLock lock = datasetLock.acquire()) {
  volumeRef = destination.obtainReference();
 }
 try {
  moveBlock(block, replicaInfo, volumeRef);
 } finally {
  if (volumeRef != null) {
   volumeRef.close();
  }
 }
 return replicaInfo;
}
origin: org.apache.hadoop/hadoop-hdfs

private void markFailed() {
 synchronized (DatasetVolumeChecker.this) {
  failedVolumes.add(reference.getVolume());
 }
}
origin: io.prestosql.hadoop/hadoop-apache

@Override
public void close() throws IOException {
 if (this.volumeReference != null) {
  volumeReference.close();
 }
}
origin: org.apache.hadoop/hadoop-hdfs

@Override
public FsVolumeSpi next() {
 int refIdx = idx++;
 return references.get(refIdx).getVolume();
}
origin: ch.cern.hadoop/hadoop-hdfs

@Override
public void close() throws IOException {
 if (this.volumeReference != null) {
  volumeReference.close();
 }
}
origin: org.apache.hadoop/hadoop-hdfs

@Override
public void onFailure(@Nonnull Throwable t) {
 Throwable exception = (t instanceof ExecutionException) ?
   t.getCause() : t;
 LOG.warn("Exception running disk checks against volume " +
   reference.getVolume(), exception);
 markFailed();
 cleanup();
}
origin: org.apache.hadoop/hadoop-hdfs

} finally {
 if (volumeRef != null) {
  volumeRef.close();
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Create a new temporary replica of replicaInfo object in specified volume.
 *
 * @param block       - Extended Block
 * @param replicaInfo - ReplicaInfo
 * @param volumeRef   - Volume Ref - Closed by caller.
 * @return newReplicaInfo new replica object created in specified volume.
 * @throws IOException
 */
@VisibleForTesting
ReplicaInfo copyReplicaToVolume(ExtendedBlock block, ReplicaInfo replicaInfo,
  FsVolumeReference volumeRef) throws IOException {
 FsVolumeImpl targetVolume = (FsVolumeImpl) volumeRef.getVolume();
 // Copy files to temp dir first
 ReplicaInfo newReplicaInfo = targetVolume.moveBlockToTmpLocation(block,
   replicaInfo, smallBufferSize, conf);
 return newReplicaInfo;
}
origin: ch.cern.hadoop/hadoop-hdfs

@Test
public void testReleaseVolumeRefIfNoBlockScanner() throws IOException {
 FsVolumeList volumeList = new FsVolumeList(
   Collections.<VolumeFailureInfo>emptyList(), null, blockChooser);
 File volDir = new File(baseDir, "volume-0");
 volDir.mkdirs();
 FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", volDir,
   conf, StorageType.DEFAULT);
 FsVolumeReference ref = volume.obtainReference();
 volumeList.addVolume(ref);
 try {
  ref.close();
  fail("Should throw exception because the reference is closed in "
    + "VolumeList#addVolume().");
 } catch (IllegalStateException e) {
 }
}
origin: org.apache.hadoop/hadoop-hdfs

@Override
public void onSuccess(@Nonnull VolumeCheckResult result) {
 switch(result) {
 case HEALTHY:
 case DEGRADED:
  LOG.debug("Volume {} is {}.", reference.getVolume(), result);
  markHealthy();
  break;
 case FAILED:
  LOG.warn("Volume {} detected as being unhealthy",
    reference.getVolume());
  markFailed();
  break;
 default:
  LOG.error("Unexpected health check result {} for volume {}",
    result, reference.getVolume());
  markHealthy();
  break;
 }
 cleanup();
}
origin: org.apache.hadoop/hadoop-hdfs

public void dropCacheBehindReads(String identifier, long offset, long len,
  int flags) throws NativeIOException {
 assert this.dataInFd != null : "null dataInFd!";
 fileIoProvider.posixFadvise(getVolumeRef().getVolume(),
   identifier, dataInFd, offset, len, flags);
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Delete the block file and meta file from the disk asynchronously, adjust
 * dfsUsed statistics accordingly.
 */
void deleteAsync(FsVolumeReference volumeRef, ReplicaInfo replicaToDelete,
  ExtendedBlock block, String trashDirectory) {
 LOG.info("Scheduling " + block.getLocalBlock()
   + " replica " + replicaToDelete + " for deletion");
 ReplicaFileDeleteTask deletionTask = new ReplicaFileDeleteTask(
   volumeRef, replicaToDelete, block, trashDirectory);
 execute(((FsVolumeImpl) volumeRef.getVolume()), deletionTask);
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Asynchronously lazy persist the block from the RamDisk to Disk.
 */
void submitLazyPersistTask(String bpId, long blockId,
  long genStamp, long creationTime,
  ReplicaInfo replica, FsVolumeReference target) throws IOException {
 if (LOG.isDebugEnabled()) {
  LOG.debug("LazyWriter schedule async task to persist RamDisk block pool id: "
    + bpId + " block id: " + blockId);
 }
 ReplicaLazyPersistTask lazyPersistTask = new ReplicaLazyPersistTask(
   bpId, blockId, genStamp, creationTime, replica, target);
 FsVolumeImpl volume = (FsVolumeImpl)target.getVolume();
 execute(volume.getStorageID(), lazyPersistTask);
}
org.apache.hadoop.hdfs.server.datanode.fsdatasetFsVolumeReference

Javadoc

This holds volume reference count as AutoClosable resource. It increases the reference count by one in the constructor, and decreases the reference count by one in #close().
 
try (FsVolumeReference ref = volume.obtainReference()) } 

Most used methods

  • close
    Decrease the reference count of the volume.
  • getVolume
    Returns the underlying volume object. Return null if the reference was released.

Popular in Java

  • Making http post requests using okhttp
  • getContentResolver (Context)
  • getApplicationContext (Context)
  • getOriginalFilename (MultipartFile)
    Return the original filename in the client's filesystem.This may contain path information depending
  • Comparator (java.util)
    A Comparator is used to compare two objects to determine their ordering with respect to each other.
  • HashSet (java.util)
    HashSet is an implementation of a Set. All optional operations (adding and removing) are supported.
  • NoSuchElementException (java.util)
    Thrown when trying to retrieve an element past the end of an Enumeration or Iterator.
  • Stream (java.util.stream)
    A sequence of elements supporting sequential and parallel aggregate operations. The following exampl
  • Runner (org.openjdk.jmh.runner)
  • Reflections (org.reflections)
    Reflections one-stop-shop objectReflections scans your classpath, indexes the metadata, allows you t
  • Top PhpStorm plugins
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now