Tabnine Logo
ReplicaInfo.getState
Code IndexAdd Tabnine to your IDE (free)

How to use
getState
method
in
org.apache.hadoop.hdfs.server.datanode.ReplicaInfo

Best Java code snippets using org.apache.hadoop.hdfs.server.datanode.ReplicaInfo.getState (Showing top 20 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

public ReplicaUnderRecovery(ReplicaInfo replica, long recoveryId) {
 super(replica, replica.getVolume(), ((LocalReplica)replica).getDir());
 if ( replica.getState() != ReplicaState.FINALIZED &&
    replica.getState() != ReplicaState.RBW &&
    replica.getState() != ReplicaState.RWR ) {
  throw new IllegalArgumentException("Cannot recover replica: " + replica);
 }
 this.original = (LocalReplica) replica;
 this.recoveryId = recoveryId;
}
origin: org.apache.hadoop/hadoop-hdfs

private LocalReplica buildRWR() throws IllegalArgumentException {
 if (null != fromReplica && fromReplica.getState() == ReplicaState.RWR) {
  return new ReplicaWaitingToBeRecovered(
    (ReplicaWaitingToBeRecovered) fromReplica);
 } else if (null != fromReplica){
  throw new IllegalArgumentException("Incompatible fromReplica "
    + "state: " + fromReplica.getState());
 } else {
  if (null != block) {
   return new ReplicaWaitingToBeRecovered(block, volume, directoryUsed);
  } else {
   return new ReplicaWaitingToBeRecovered(blockId, length, genStamp,
     volume, directoryUsed);
  }
 }
}
origin: org.apache.hadoop/hadoop-hdfs

private LocalReplica buildFinalizedReplica() throws IllegalArgumentException {
 if (null != fromReplica &&
   fromReplica.getState() == ReplicaState.FINALIZED) {
  return new FinalizedReplica((FinalizedReplica)fromReplica);
 } else if (null != this.fromReplica) {
  throw new IllegalArgumentException("Incompatible fromReplica "
    + "state: " + fromReplica.getState());
 } else {
  if (null != block) {
   return new FinalizedReplica(block, volume, directoryUsed,
     lastPartialChunkChecksum);
  } else {
   return new FinalizedReplica(blockId, length, genStamp, volume,
     directoryUsed, lastPartialChunkChecksum);
  }
 }
}
origin: org.apache.hadoop/hadoop-hdfs

private LocalReplica buildRUR() throws IllegalArgumentException {
 if (null == fromReplica) {
  throw new IllegalArgumentException(
    "Missing a valid replica to recover from");
 }
 if (null != writer || null != block) {
  throw new IllegalArgumentException("Invalid state for "
    + "recovering from replica with blk id "
    + fromReplica.getBlockId());
 }
 if (fromReplica.getState() == ReplicaState.RUR) {
  return new ReplicaUnderRecovery((ReplicaUnderRecovery) fromReplica);
 } else {
  return new ReplicaUnderRecovery(fromReplica, recoveryId);
 }
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * This should be primarily used for testing.
 * @return clone of replica store in datanode memory
 */
ReplicaInfo fetchReplicaInfo(String bpid, long blockId) {
 ReplicaInfo r = volumeMap.get(bpid, blockId);
 if (r == null) {
  return null;
 }
 switch(r.getState()) {
 case FINALIZED:
 case RBW:
 case RWR:
 case RUR:
 case TEMPORARY:
  return new ReplicaBuilder(r.getState()).from(r).build();
 }
 return null;
}

origin: org.apache.hadoop/hadoop-hdfs

 void stopAllDataxceiverThreads(FsVolumeImpl volume) {
  try (AutoCloseableLock lock = datasetLock.acquire()) {
   for (String blockPoolId : volumeMap.getBlockPoolList()) {
    Collection<ReplicaInfo> replicas = volumeMap.replicas(blockPoolId);
    for (ReplicaInfo replicaInfo : replicas) {
     if ((replicaInfo.getState() == ReplicaState.TEMPORARY
       || replicaInfo.getState() == ReplicaState.RBW)
       && replicaInfo.getVolume().equals(volume)) {
      ReplicaInPipeline replicaInPipeline =
        (ReplicaInPipeline) replicaInfo;
      replicaInPipeline.interruptThread();
     }
    }
   }
  }
 }
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Gets a list of references to the finalized blocks for the given block pool.
 * <p>
 * Callers of this function should call
 * {@link FsDatasetSpi#acquireDatasetLock} to avoid blocks' status being
 * changed during list iteration.
 * </p>
 * @return a list of references to the finalized blocks for the given block
 *         pool.
 */
@Override
public List<ReplicaInfo> getFinalizedBlocks(String bpid) {
 try (AutoCloseableLock lock = datasetLock.acquire()) {
  final List<ReplicaInfo> finalized = new ArrayList<ReplicaInfo>(
    volumeMap.size(bpid));
  for (ReplicaInfo b : volumeMap.replicas(bpid)) {
   if (b.getState() == ReplicaState.FINALIZED) {
    finalized.add(b);
   }
  }
  return finalized;
 }
}
origin: org.apache.hadoop/hadoop-hdfs

private LocalReplicaInPipeline buildRBW() throws IllegalArgumentException {
 if (null != fromReplica && fromReplica.getState() == ReplicaState.RBW) {
  return new ReplicaBeingWritten((ReplicaBeingWritten) fromReplica);
 } else if (null != fromReplica) {
  throw new IllegalArgumentException("Incompatible fromReplica "
    + "state: " + fromReplica.getState());
 } else {
  if (null != block) {
   if (null == writer) {
    throw new IllegalArgumentException("A valid writer is "
      + "required for constructing a RBW from block "
      + block.getBlockId());
   }
   return new ReplicaBeingWritten(block, volume, directoryUsed, writer);
  } else {
   if (length != -1) {
    return new ReplicaBeingWritten(blockId, length, genStamp,
      volume, directoryUsed, writer, bytesToReserve);
   } else {
    return new ReplicaBeingWritten(blockId, genStamp, volume,
      directoryUsed, bytesToReserve);
   }
  }
 }
}
origin: org.apache.hadoop/hadoop-hdfs

@Override  //Object
public String toString() {
 return getClass().getSimpleName()
   + ", " + super.toString()
   + ", " + getState()
   + "\n  getNumBytes()     = " + getNumBytes()
   + "\n  getBytesOnDisk()  = " + getBytesOnDisk()
   + "\n  getVisibleLength()= " + getVisibleLength()
   + "\n  getVolume()       = " + getVolume()
   + "\n  getBlockURI()     = " + getBlockURI();
}
origin: org.apache.hadoop/hadoop-hdfs

private LocalReplicaInPipeline buildTemporaryReplica()
  throws IllegalArgumentException {
 if (null != fromReplica &&
   fromReplica.getState() == ReplicaState.TEMPORARY) {
  return new LocalReplicaInPipeline((LocalReplicaInPipeline) fromReplica);
 } else if (null != fromReplica) {
  throw new IllegalArgumentException("Incompatible fromReplica "
    + "state: " + fromReplica.getState());
 } else {
  if (null != block) {
   if (null == writer) {
    throw new IllegalArgumentException("A valid writer is "
      + "required for constructing a Replica from block "
      + block.getBlockId());
   }
   return new LocalReplicaInPipeline(block, volume, directoryUsed,
     writer);
  } else {
   if (length != -1) {
    return new LocalReplicaInPipeline(blockId, length, genStamp,
      volume, directoryUsed, writer, bytesToReserve);
   } else {
    return new LocalReplicaInPipeline(blockId, genStamp, volume,
      directoryUsed, bytesToReserve);
   }
  }
 }
}
origin: org.apache.hadoop/hadoop-hdfs

try (AutoCloseableLock lock = datasetLock.acquire()) {
 if (replicaInfo.getState() != ReplicaState.FINALIZED) {
  throw new IOException("Only a Finalized replica can be appended to; "
    + "Replica with blk id " + replicaInfo.getBlockId() + " has state "
    + replicaInfo.getState());
 ReplicaInPipeline rip = v.append(bpid, replicaInfo,
   newGS, estimateBlockLen);
 if (rip.getReplicaInfo().getState() != ReplicaState.RBW) {
  throw new IOException("Append on block " + replicaInfo.getBlockId() +
    " returned a replica of state " + rip.getReplicaInfo().getState()
    + "; expected RBW");
origin: org.apache.hadoop/hadoop-hdfs

if (replicaInfo.getState() == ReplicaState.FINALIZED) {
origin: org.apache.hadoop/hadoop-hdfs

 throw new ReplicaNotFoundException(b);
if (replicaInfo.getState() != state) {
 throw new UnexpectedReplicaStateException(b,state);
origin: org.apache.hadoop/hadoop-hdfs

/**
 *
 * @param bytesReserved Space that was reserved during
 *     block creation. Now that the block is being finalized we
 *     can free up this space.
 * @return
 * @throws IOException
 */
ReplicaInfo addFinalizedBlock(String bpid, Block b, ReplicaInfo replicaInfo,
  long bytesReserved) throws IOException {
 releaseReservedSpace(bytesReserved);
 File dest = getBlockPoolSlice(bpid).addFinalizedBlock(b, replicaInfo);
 byte[] checksum = null;
 // copy the last partial checksum if the replica is originally
 // in finalized or rbw state.
 if (replicaInfo.getState() == ReplicaState.FINALIZED) {
  FinalizedReplica finalized = (FinalizedReplica)replicaInfo;
  checksum = finalized.getLastPartialChunkChecksum();
 } else if (replicaInfo.getState() == ReplicaState.RBW) {
  ReplicaBeingWritten rbw = (ReplicaBeingWritten)replicaInfo;
  checksum = rbw.getLastChecksumAndDataLen().getChecksum();
 }
 return new ReplicaBuilder(ReplicaState.FINALIZED)
   .setBlock(replicaInfo)
   .setFsVolume(this)
   .setDirectoryToUse(dest.getParentFile())
   .setLastPartialChunkChecksum(checksum)
   .build();
}
origin: org.apache.hadoop/hadoop-hdfs

@Override // FsDatasetSpi
public Replica recoverClose(ExtendedBlock b, long newGS,
  long expectedBlockLen) throws IOException {
 LOG.info("Recover failed close " + b);
 while (true) {
  try {
   try (AutoCloseableLock lock = datasetLock.acquire()) {
    // check replica's state
    ReplicaInfo replicaInfo = recoverCheck(b, newGS, expectedBlockLen);
    // bump the replica's GS
    replicaInfo.bumpReplicaGS(newGS);
    // finalize the replica if RBW
    if (replicaInfo.getState() == ReplicaState.RBW) {
     finalizeReplica(b.getBlockPoolId(), replicaInfo);
    }
    return replicaInfo;
   }
  } catch (MustStopExistingWriter e) {
   e.getReplicaInPipeline()
     .stopWriter(datanode.getDnConf().getXceiverStopTimeout());
  }
 }
}
origin: org.apache.hadoop/hadoop-hdfs

@Override // FsDatasetSpi
public ReplicaHandler recoverRbw(
  ExtendedBlock b, long newGS, long minBytesRcvd, long maxBytesRcvd)
  throws IOException {
 LOG.info("Recover RBW replica " + b);
 while (true) {
  try {
   try (AutoCloseableLock lock = datasetLock.acquire()) {
    ReplicaInfo replicaInfo =
      getReplicaInfo(b.getBlockPoolId(), b.getBlockId());
    // check the replica's state
    if (replicaInfo.getState() != ReplicaState.RBW) {
     throw new ReplicaNotFoundException(
       ReplicaNotFoundException.NON_RBW_REPLICA + replicaInfo);
    }
    ReplicaInPipeline rbw = (ReplicaInPipeline)replicaInfo;
    if (!rbw.attemptToSetWriter(null, Thread.currentThread())) {
     throw new MustStopExistingWriter(rbw);
    }
    LOG.info("At " + datanode.getDisplayName() + ", Recovering " + rbw);
    return recoverRbwImpl(rbw, b, newGS, minBytesRcvd, maxBytesRcvd);
   }
  } catch (MustStopExistingWriter e) {
   e.getReplicaInPipeline().stopWriter(
     datanode.getDnConf().getXceiverStopTimeout());
  }
 }
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Moves a given block from one volume to another volume. This is used by disk
 * balancer.
 *
 * @param block       - ExtendedBlock
 * @param destination - Destination volume
 * @return Old replica info
 */
@Override
public ReplicaInfo moveBlockAcrossVolumes(ExtendedBlock block, FsVolumeSpi
  destination) throws IOException {
 ReplicaInfo replicaInfo = getReplicaInfo(block);
 if (replicaInfo.getState() != ReplicaState.FINALIZED) {
  throw new ReplicaNotFoundException(
    ReplicaNotFoundException.UNFINALIZED_REPLICA + block);
 }
 FsVolumeReference volumeRef = null;
 try (AutoCloseableLock lock = datasetLock.acquire()) {
  volumeRef = destination.obtainReference();
 }
 try {
  moveBlock(block, replicaInfo, volumeRef);
 } finally {
  if (volumeRef != null) {
   volumeRef.close();
  }
 }
 return replicaInfo;
}
origin: org.apache.hadoop/hadoop-hdfs

if (replicaInfo.getState() != ReplicaState.FINALIZED) {
 throw new ReplicaNotFoundException(
   ReplicaNotFoundException.UNFINALIZED_REPLICA + b);
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Remove the temporary block file (if any)
 */
@Override // FsDatasetSpi
public void unfinalizeBlock(ExtendedBlock b) throws IOException {
 try (AutoCloseableLock lock = datasetLock.acquire()) {
  ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(),
    b.getLocalBlock());
  if (replicaInfo != null &&
    replicaInfo.getState() == ReplicaState.TEMPORARY) {
   // remove from volumeMap
   volumeMap.remove(b.getBlockPoolId(), b.getLocalBlock());
   // delete the on-disk temp file
   if (delBlockFromDisk(replicaInfo)) {
    LOG.warn("Block " + b + " unfinalized and removed. ");
   }
   if (replicaInfo.getVolume().isTransientStorage()) {
    ramDiskReplicaTracker.discardReplica(b.getBlockPoolId(),
      b.getBlockId(), true);
   }
  }
 }
}
origin: org.apache.hadoop/hadoop-hdfs

try {
 if (replicaInfo.getState() == ReplicaState.FINALIZED) {
  replica = append(b.getBlockPoolId(), replicaInfo,
           newGS, b.getNumBytes());
org.apache.hadoop.hdfs.server.datanodeReplicaInfogetState

Popular methods of ReplicaInfo

  • getBlockId
  • getBytesOnDisk
  • getGenerationStamp
  • getNumBytes
  • getVolume
    Get the volume where this replica is located on disk
  • getBlockFile
    Get the full path of this replica's data file
  • getMetaFile
    Get the full path of this replica's meta file
  • getBlockName
  • getBytesReserved
    Number of bytes reserved for this replica on disk.
  • getStorageUuid
    Get the storageUuid of the volume that stores this replica.
  • getVisibleLength
  • isOnTransientStorage
  • getVisibleLength,
  • isOnTransientStorage,
  • setGenerationStamp,
  • setNumBytes,
  • equals,
  • getDir,
  • hashCode,
  • isUnlinked,
  • parseBaseDir

Popular in Java

  • Reading from database using SQL prepared statement
  • setScale (BigDecimal)
  • notifyDataSetChanged (ArrayAdapter)
  • setContentView (Activity)
  • ServerSocket (java.net)
    This class represents a server-side socket that waits for incoming client connections. A ServerSocke
  • ResultSet (java.sql)
    An interface for an object which represents a database table entry, returned as the result of the qu
  • BlockingQueue (java.util.concurrent)
    A java.util.Queue that additionally supports operations that wait for the queue to become non-empty
  • TimeUnit (java.util.concurrent)
    A TimeUnit represents time durations at a given unit of granularity and provides utility methods to
  • Handler (java.util.logging)
    A Handler object accepts a logging request and exports the desired messages to a target, for example
  • Logger (org.slf4j)
    The org.slf4j.Logger interface is the main user entry point of SLF4J API. It is expected that loggin
  • From CI to AI: The AI layer in your organization
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now