Tabnine Logo
org.apache.hadoop.hdfs.server.datanode
Code IndexAdd Tabnine to your IDE (free)

How to use org.apache.hadoop.hdfs.server.datanode

Best Java code snippets using org.apache.hadoop.hdfs.server.datanode (Showing top 20 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

@Override // ClientDatanodeProtocol
public void triggerBlockReport(BlockReportOptions options)
  throws IOException {
 checkSuperuserPrivilege();
 for (BPOfferService bpos : blockPoolManager.getAllNamenodeThreads()) {
  if (bpos != null) {
   for (BPServiceActor actor : bpos.getBPServiceActors()) {
    actor.triggerBlockReport(options);
   }
  }
 }
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * @param bpid block pool Id
 * @return true - if BPOfferService thread is alive
 */
public boolean isBPServiceAlive(String bpid) {
 BPOfferService bp = blockPoolManager.get(bpid);
 return bp != null ? bp.isAlive() : false;
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Return the parent directory path where this replica is located.
 * @return the parent directory path where this replica is located
 */
protected File getDir() {
 return hasSubdirs ? DatanodeUtil.idToBlockDir(baseDir,
   getBlockId()) : baseDir;
}
origin: org.apache.hadoop/hadoop-hdfs

void reportBadBlocks(ExtendedBlock block,
           String storageUuid, StorageType storageType) {
 checkBlock(block);
 for (BPServiceActor actor : bpServices) {
  ReportBadBlockAction rbbAction = new ReportBadBlockAction
    (block, storageUuid, storageType);
  actor.bpThreadEnqueue(rbbAction);
 }
}

origin: apache/hbase

LOG.info("killing datanode " + name + " / " + lookup);
ipcPort = dn.ipcServer.getListenerAddress().getPort();
dn.shutdown();
LOG.info("killed datanode " + name + " / " + lookup);
break;
origin: org.apache.hadoop/hadoop-hdfs

/**
 * A data node is considered to be up if one of the bp services is up
 */
public boolean isDatanodeUp() {
 for (BPOfferService bp : blockPoolManager.getAllNamenodeThreads()) {
  if (bp.isAlive()) {
   return true;
  }
 }
 return false;
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Ask each of the actors to schedule a block report after
 * the specified delay.
 */
void scheduleBlockReport(long delay) {
 for (BPServiceActor actor : bpServices) {
  actor.getScheduler().scheduleBlockReport(delay);
 }
}
origin: org.apache.hadoop/hadoop-hdfs

void joinAll() {
 for (BPOfferService bpos: this.getAllNamenodeThreads()) {
  bpos.join();
 }
}

origin: org.apache.hadoop/hadoop-hdfs

private void shutdownPeriodicScanners() {
 shutdownDirectoryScanner();
 blockScanner.removeAllVolumeScanners();
}
origin: org.apache.hadoop/hadoop-hdfs

@Override // ReplicaInPipeline
public synchronized ChunkChecksum getLastChecksumAndDataLen() {
 return new ChunkChecksum(getBytesOnDisk(), lastChecksum);
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * This methods  arranges for the data node to send 
 * the block report at the next heartbeat.
 */
public void scheduleAllBlockReport(long delay) {
 for(BPOfferService bpos : blockPoolManager.getAllNamenodeThreads()) {
  bpos.scheduleBlockReport(delay);
 }
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Report a bad block on another DN (eg if we received a corrupt replica
 * from a remote host).
 * @param srcDataNode the DN hosting the bad block
 * @param block the block itself
 */
public void reportRemoteBadBlock(DatanodeInfo srcDataNode, ExtendedBlock block)
  throws IOException {
 BPOfferService bpos = getBPOSForBlock(block);
 bpos.reportRemoteBadBlock(srcDataNode, block);
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Called by the DN to report an error to the NNs.
 */
void trySendErrorReport(int errCode, String errMsg) {
 for (BPServiceActor actor : bpServices) {
  ErrorReportAction errorReportAction = new ErrorReportAction 
    (errCode, errMsg);
  actor.bpThreadEnqueue(errorReportAction);
 }
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Cancels a running plan.
 * @param planID - Hash string that identifies a plan
 */
@Override
public void cancelDiskBalancePlan(String planID) throws
  IOException {
 checkSuperuserPrivilege();
 getDiskBalancer().cancelPlan(planID);
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Returns the status of current or last executed work plan.
 * @return DiskBalancerWorkStatus.
 * @throws IOException
 */
@Override
public DiskBalancerWorkStatus queryDiskBalancerPlan() throws IOException {
 checkSuperuserPrivilege();
 return getDiskBalancer().queryWorkStatus();
}
origin: org.apache.hadoop/hadoop-hdfs

@Override
public OutputStream getDataOutputStream(boolean append) throws IOException {
 return getFileIoProvider().getFileOutputStream(
   getVolume(), getBlockFile(), append);
}
origin: org.apache.hadoop/hadoop-hdfs

@Override
public boolean deleteMetadata() {
 return getFileIoProvider().fullyDelete(getVolume(), getMetaFile());
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Inflates bytesCopied and returns true or false. This allows us to stop
 * copying if we have reached close enough.
 *
 * @param item DiskBalancerWorkItem
 * @return -- false if we need to copy more, true if we are done
 */
private boolean isCloseEnough(DiskBalancerWorkItem item) {
 long temp = item.getBytesCopied() +
   ((item.getBytesCopied() * getBlockTolerancePercentage(item)) / 100);
 return (item.getBytesToCopy() >= temp) ? false : true;
}
origin: org.apache.hadoop/hadoop-hdfs

@Override // DataNodeMXBean
public String getDiskBalancerStatus() {
 try {
  return getDiskBalancer().queryWorkStatus().toJsonString();
 } catch (IOException ex) {
  LOG.debug("Reading diskbalancer Status failed. ex:{}", ex);
  return "";
 }
}
origin: apache/hbase

 public void waitForNumReplicas(int numReplicas) throws Exception {
  while (UTIL.getDFSCluster().getDataNodes().size() < numReplicas) {
   Thread.sleep(100);
  }

  for (int i = 0; i < numReplicas; ++i) {
   for (DataNode dn: UTIL.getDFSCluster().getDataNodes()) {
    while (!dn.isDatanodeFullyStarted()) {
     Thread.sleep(100);
    }
   }
  }
 }
}
org.apache.hadoop.hdfs.server.datanode

Most used classes

  • DataNode
    DataNode is a class (and program) that stores a set of blocks for a DFS deployment. A single deploym
  • DataStorage
    Data storage information file.
  • BlockMetadataHeader
    BlockMetadataHeader manages metadata for data blocks on Datanodes. This is not related to the Block
  • DataNode$BlockRecord
    A convenient class used in block recovery
  • DataXceiverServer
    Server used for receiving/sending a block of data. This is created to listen for requests from clien
  • BlockSender,
  • ChunkChecksum,
  • DataNode$DataTransfer,
  • DataXceiverServer$BlockBalanceThrottler,
  • ReplicaAlreadyExistsException,
  • ReplicaBeingWritten,
  • ReplicaNotFoundException,
  • BlockReceiver$Packet,
  • BlockReceiver$PacketResponder,
  • BlockReceiver,
  • CachingStrategy,
  • DataXceiver,
  • DirectoryScanner,
  • FinalizedReplica
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now