Tabnine Logo
DataXceiver
Code IndexAdd Tabnine to your IDE (free)

How to use
DataXceiver
in
org.apache.hadoop.hdfs.server.datanode

Best Java code snippets using org.apache.hadoop.hdfs.server.datanode.DataXceiver (Showing top 20 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

public static DataXceiver create(Peer peer, DataNode dn,
  DataXceiverServer dataXceiverServer) throws IOException {
 return new DataXceiver(peer, dn, dataXceiverServer);
}

origin: org.apache.hadoop/hadoop-hdfs

@Override
public void copyBlock(final ExtendedBlock block,
  final Token<BlockTokenIdentifier> blockToken) throws IOException {
 updateCurrentThreadName("Copying block " + block);
 DataOutputStream reply = getBufferedOutputStream();
 checkAccess(reply, true, block, blockToken, Op.COPY_BLOCK,
   BlockTokenIdentifier.AccessMode.COPY);
    "to " + peer.getRemoteAddressString() + " because it's pinned ";
  LOG.info(msg);
  sendResponse(Status.ERROR_BLOCK_PINNED, msg);
  return;
    "quota is exceeded.";
  LOG.info(msg);
  sendResponse(ERROR, msg);
  return;
  OutputStream baseStream = getOutputStream();
  writeSuccessWithChecksumInfo(blockSender, reply);
  isOpSuccess = false;
  LOG.info("opCopyBlock {} received exception {}", block, ioe.toString());
  incrDatanodeNetworkErrors();
  throw ioe;
 } finally {
 datanode.metrics.addCopyBlockOp(elapsed());
origin: org.apache.hadoop/hadoop-hdfs

 final StorageType[] storageTypes,
 final String[] storageIds) throws IOException {
checkAndWaitForBP(blk);
if (datanode.isBlockTokenEnabled) {
 LOG.debug("Checking block access token for block '{}' with mode '{}'",
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Utility function for sending a response.
 * 
 * @param status status message to write
 * @param message message to send to the client or other DN
 */
private void sendResponse(Status status,
  String message) throws IOException {
 writeResponse(status, message, getOutputStream());
}
origin: org.apache.hadoop/hadoop-hdfs

@Override
public void transferBlock(final ExtendedBlock blk,
  final Token<BlockTokenIdentifier> blockToken,
  final String clientName,
  final DatanodeInfo[] targets,
  final StorageType[] targetStorageTypes,
  final String[] targetStorageIds) throws IOException {
 previousOpClientName = clientName;
 updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);
 final DataOutputStream out = new DataOutputStream(
   getOutputStream());
 checkAccess(out, true, blk, blockToken, Op.TRANSFER_BLOCK,
   BlockTokenIdentifier.AccessMode.COPY, targetStorageTypes,
   targetStorageIds);
 try {
  datanode.transferReplicaForPipelineRecovery(blk, targets,
    targetStorageTypes, targetStorageIds, clientName);
  writeResponse(Status.SUCCESS, null, out);
 } catch (IOException ioe) {
  LOG.info("transferBlock {} received exception {}",
    blk, ioe.toString());
  incrDatanodeNetworkErrors();
  throw ioe;
 } finally {
  IOUtils.closeStream(out);
 }
}
origin: org.apache.hadoop/hadoop-hdfs

 BlockChecksumOptions blockChecksumOptions)
 throws IOException {
updateCurrentThreadName("Getting checksum for block " + block);
final DataOutputStream out = new DataOutputStream(
  getOutputStream());
checkAccess(out, true, block, blockToken, Op.BLOCK_CHECKSUM,
  BlockTokenIdentifier.AccessMode.READ);
BlockChecksumComputer maker = new ReplicatedBlockChecksumComputer(
 LOG.info("blockChecksum {} received exception {}",
   block, ioe.toString());
 incrDatanodeNetworkErrors();
 throw ioe;
} finally {
datanode.metrics.addBlockChecksumOp(elapsed());
origin: org.apache.hadoop/hadoop-hdfs

 final DatanodeInfo proxySource,
 final String storageId) throws IOException {
updateCurrentThreadName("Replacing block " + block + " from " + delHint);
DataOutputStream replyOut = new DataOutputStream(getOutputStream());
checkAccess(replyOut, true, block, blockToken,
  Op.REPLACE_BLOCK, BlockTokenIdentifier.AccessMode.REPLACE,
  new StorageType[]{storageType},
   "quota is exceeded.";
 LOG.warn(msg);
 sendResponse(ERROR, msg);
 return;
    checksumInfo.getChecksum());
  setCurrentBlockReceiver(getBlockReceiver(block, storageType,
    proxyReply, proxySock.getRemoteSocketAddress().toString(),
    proxySock.getLocalSocketAddress().toString(),
  incrDatanodeNetworkErrors();
  sendResponse(opStatus, errMsg);
 } catch (IOException ioe) {
  LOG.warn("Error writing reply back to {}",
    peer.getRemoteAddressString());
  incrDatanodeNetworkErrors();
datanode.metrics.addReplaceBlockOp(elapsed());
origin: org.apache.hadoop/hadoop-hdfs

 final String[] targetStorageIds) throws IOException {
previousOpClientName = clientname;
updateCurrentThreadName("Receiving block " + block);
final boolean isDatanode = clientname.length() == 0;
final boolean isClient = !isDatanode;
final DataOutputStream replyOut = getBufferedOutputStream();
checkAccess(replyOut, isClient, block, blockToken, Op.WRITE_BLOCK,
  BlockTokenIdentifier.AccessMode.WRITE,
  storageTypes, storageIds);
  setCurrentBlockReceiver(getBlockReceiver(block, storageType, in,
    peer.getRemoteAddressString(),
    peer.getLocalAddressString(),
    LOG.info("{}:Exception transfering {} to mirror {}- continuing " +
      "without the mirror", datanode, block, mirrorNode, e);
    incrDatanodeNetworkErrors();
   writeResponse(SUCCESS, null, replyOut);
 LOG.info("opWriteBlock {} received exception {}",
   block, ioe.toString());
 incrDatanodeNetworkErrors();
 throw ioe;
} finally {
 IOUtils.closeSocket(mirrorSock);
origin: ch.cern.hadoop/hadoop-hdfs

 final boolean[] targetPinnings) throws IOException {
previousOpClientName = clientname;
updateCurrentThreadName("Receiving block " + block);
final boolean isDatanode = clientname.length() == 0;
final boolean isClient = !isDatanode;
long size = 0;
final DataOutputStream replyOut = getBufferedOutputStream();
checkAccess(replyOut, isClient, block, blockToken,
  Op.WRITE_BLOCK, BlockTokenSecretManager.AccessMode.WRITE);
         block + " to mirror " + mirrorNode +
         "- continuing without the mirror", e);
    incrDatanodeNetworkErrors();
    LOG.trace("TRANSFER: send close-ack");
   writeResponse(SUCCESS, null, replyOut);
 incrDatanodeNetworkErrors();
 throw ioe;
} finally {
datanode.metrics.addWriteBlockOp(elapsed());
datanode.metrics.incrWritesFromClient(peer.isLocal(), size);
origin: org.apache.hadoop/hadoop-hdfs

 updateCurrentThreadName("Waiting for operation #" + (opsProcessed + 1));
   peer.setReadTimeout(dnConf.socketTimeout);
  op = readOp();
 } catch (InterruptedIOException ignored) {
  break;
 } catch (IOException err) {
  incrDatanodeNetworkErrors();
  throw err;
 processOp(op);
 ++opsProcessed;
} while ((peer != null) &&
collectThreadLocalStates();
LOG.debug("{}:Number of active connections is: {}",
  datanode.getDisplayName(), datanode.getXceiverCount());
updateCurrentThreadName("Cleaning up");
if (peer != null) {
 dataXceiverServer.closePeer(peer);
origin: com.facebook.hadoop/hadoop-core

getAddresses();
updateCurrentThreadName("waiting for operation");
switch ( op ) {
case DataTransferProtocol.OP_READ_BLOCK:
 readBlock( in, versionAndOpcode );
 datanode.myMetrics.readBlockOp.inc(DataNode.now() - startTime);
 if (local)
 break;
case DataTransferProtocol.OP_READ_BLOCK_ACCELERATOR:
 readBlockAccelerator( in );
 datanode.myMetrics.readBlockOp.inc(DataNode.now() - startTime);
 if (local)
 break;
case DataTransferProtocol.OP_WRITE_BLOCK:
 writeBlock( in, versionAndOpcode );
 datanode.myMetrics.writeBlockOp.inc(DataNode.now() - startTime);
 if (local)
 break;
case DataTransferProtocol.OP_READ_METADATA:
 readMetadata( in );
 datanode.myMetrics.readMetadataOp.inc(DataNode.now() - startTime);
 break;
case DataTransferProtocol.OP_REPLACE_BLOCK: // for balancing purpose; send to a destination
 replaceBlock(in);
origin: org.apache.hadoop/hadoop-hdfs

 SlotId slotId, int maxVersion, boolean supportsReceiptVerification)
  throws IOException {
updateCurrentThreadName("Passing file descriptors for block " + blk);
DataOutputStream out = getBufferedOutputStream();
checkAccess(out, true, blk, token,
  Op.REQUEST_SHORT_CIRCUIT_FDS, BlockTokenIdentifier.AccessMode.READ,
  null, null);
origin: com.facebook.hadoop/hadoop-core

 LOG.warn("Not able to receive block " + blockId + " from " 
   + s.getRemoteSocketAddress() + " because threads quota is exceeded.");
 sendResponse(s, (short)DataTransferProtocol.OP_STATUS_ERROR, 
   datanode.socketWriteTimeout);
 return;
long writeDuration;
updateCurrentThreadName("replacing block " + block + " from " + sourceID);
try {
  sendResponse(s, opStatus, datanode.socketWriteTimeout);
 } catch (IOException ioe) {
  LOG.warn("Error writing reply back to " + s.getRemoteSocketAddress());
origin: com.facebook.hadoop/hadoop-core

MetaDataInputStream checksumIn = null;
DataOutputStream out = null;
updateCurrentThreadName("reading metadata for block " + block);
try {
 checksumIn = datanode.data.getMetaDataInputStream(namespaceId, block);
origin: org.jvnet.hudson.hadoop/hadoop-core

LOG.warn("Not able to receive block " + blockId + " from " 
  + s.getRemoteSocketAddress() + " because threads quota is exceeded.");
sendResponse(s, (short)DataTransferProtocol.OP_STATUS_ERROR, 
  datanode.socketWriteTimeout);
return;
 sendResponse(s, opStatus, datanode.socketWriteTimeout);
} catch (IOException ioe) {
 LOG.warn("Error writing reply back to " + s.getRemoteSocketAddress());
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Separated for testing.
 * @return
 */
@VisibleForTesting
DataOutputStream getBufferedOutputStream() {
 return new DataOutputStream(
   new BufferedOutputStream(getOutputStream(), smallBufferSize));
}
origin: org.apache.hadoop/hadoop-hdfs

   DataXceiver.create(peer, datanode, this))
   .start();
} catch (SocketTimeoutException ignored) {
origin: ch.cern.hadoop/hadoop-hdfs

 final Op op,
 final BlockTokenSecretManager.AccessMode mode) throws IOException {
checkAndWaitForBP(blk);
if (datanode.isBlockTokenEnabled) {
 if (LOG.isDebugEnabled()) {
origin: org.apache.hadoop/hadoop-hdfs

 throws IOException {
final ExtendedBlock block = stripedBlockInfo.getBlock();
updateCurrentThreadName("Getting checksum for block group" +
  block);
final DataOutputStream out = new DataOutputStream(getOutputStream());
checkAccess(out, true, block, blockToken, Op.BLOCK_GROUP_CHECKSUM,
  BlockTokenIdentifier.AccessMode.READ);
 LOG.info("blockChecksum {} received exception {}",
   stripedBlockInfo.getBlock(), ioe.toString());
 incrDatanodeNetworkErrors();
 throw ioe;
} finally {
datanode.metrics.addBlockChecksumOp(elapsed());
origin: ch.cern.hadoop/hadoop-hdfs

@Override
public void transferBlock(final ExtendedBlock blk,
  final Token<BlockTokenIdentifier> blockToken,
  final String clientName,
  final DatanodeInfo[] targets,
  final StorageType[] targetStorageTypes) throws IOException {
 previousOpClientName = clientName;
 updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);
 final DataOutputStream out = new DataOutputStream(
   getOutputStream());
 checkAccess(out, true, blk, blockToken,
   Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY);
 try {
  datanode.transferReplicaForPipelineRecovery(blk, targets,
    targetStorageTypes, clientName);
  writeResponse(Status.SUCCESS, null, out);
 } catch (IOException ioe) {
  LOG.info("transferBlock " + blk + " received exception " + ioe);
  incrDatanodeNetworkErrors();
  throw ioe;
 } finally {
  IOUtils.closeStream(out);
 }
}
org.apache.hadoop.hdfs.server.datanodeDataXceiver

Javadoc

Thread for processing incoming/outgoing data stream.

Most used methods

  • <init>
  • sendResponse
    Utility function for sending a response.
  • updateCurrentThreadName
    Update the current thread's name to contain the current status. Use this only after this receiver ha
  • checkAccess
  • checkAndWaitForBP
    Wait until the BP is registered, upto the configured amount of time. Throws an exception if times ou
  • create
  • elapsed
  • getBufferedOutputStream
    Separated for testing.
  • getOutputStream
  • incrDatanodeNetworkErrors
  • processOp
  • readOp
  • processOp,
  • readOp,
  • releaseSocket,
  • sendOOB,
  • sendShmErrorResponse,
  • sendShmSuccessResponse,
  • writeResponse,
  • writeSuccessWithChecksumInfo,
  • calcPartialBlockChecksum,
  • copyBlock

Popular in Java

  • Running tasks concurrently on multiple threads
  • getContentResolver (Context)
  • notifyDataSetChanged (ArrayAdapter)
  • setScale (BigDecimal)
  • HttpServer (com.sun.net.httpserver)
    This class implements a simple HTTP server. A HttpServer is bound to an IP address and port number a
  • RandomAccessFile (java.io)
    Allows reading from and writing to a file in a random-access manner. This is different from the uni-
  • String (java.lang)
  • Collections (java.util)
    This class consists exclusively of static methods that operate on or return collections. It contains
  • Executors (java.util.concurrent)
    Factory and utility methods for Executor, ExecutorService, ScheduledExecutorService, ThreadFactory,
  • LoggerFactory (org.slf4j)
    The LoggerFactory is a utility class producing Loggers for various logging APIs, most notably for lo
  • Best plugins for Eclipse
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now