Tabnine Logo
DataXceiver.getOutputStream
Code IndexAdd Tabnine to your IDE (free)

How to use
getOutputStream
method
in
org.apache.hadoop.hdfs.server.datanode.DataXceiver

Best Java code snippets using org.apache.hadoop.hdfs.server.datanode.DataXceiver.getOutputStream (Showing top 20 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

/**
 * Separated for testing.
 * @return
 */
@VisibleForTesting
DataOutputStream getBufferedOutputStream() {
 return new DataOutputStream(
   new BufferedOutputStream(getOutputStream(), smallBufferSize));
}
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Utility function for sending a response.
 * 
 * @param status status message to write
 * @param message message to send to the client or other DN
 */
private void sendResponse(Status status,
  String message) throws IOException {
 writeResponse(status, message, getOutputStream());
}
origin: org.apache.hadoop/hadoop-hdfs

@Override
public void transferBlock(final ExtendedBlock blk,
  final Token<BlockTokenIdentifier> blockToken,
  final String clientName,
  final DatanodeInfo[] targets,
  final StorageType[] targetStorageTypes,
  final String[] targetStorageIds) throws IOException {
 previousOpClientName = clientName;
 updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);
 final DataOutputStream out = new DataOutputStream(
   getOutputStream());
 checkAccess(out, true, blk, blockToken, Op.TRANSFER_BLOCK,
   BlockTokenIdentifier.AccessMode.COPY, targetStorageTypes,
   targetStorageIds);
 try {
  datanode.transferReplicaForPipelineRecovery(blk, targets,
    targetStorageTypes, targetStorageIds, clientName);
  writeResponse(Status.SUCCESS, null, out);
 } catch (IOException ioe) {
  LOG.info("transferBlock {} received exception {}",
    blk, ioe.toString());
  incrDatanodeNetworkErrors();
  throw ioe;
 } finally {
  IOUtils.closeStream(out);
 }
}
origin: org.apache.hadoop/hadoop-hdfs

 final String storageId) throws IOException {
updateCurrentThreadName("Replacing block " + block + " from " + delHint);
DataOutputStream replyOut = new DataOutputStream(getOutputStream());
checkAccess(replyOut, true, block, blockToken,
  Op.REPLACE_BLOCK, BlockTokenIdentifier.AccessMode.REPLACE,
origin: org.apache.hadoop/hadoop-hdfs

long read = 0;
updateCurrentThreadName("Sending block " + block);
OutputStream baseStream = getOutputStream();
DataOutputStream out = getBufferedOutputStream();
checkAccess(out, true, block, blockToken, Op.READ_BLOCK,
 writeSuccessWithChecksumInfo(blockSender, new DataOutputStream(getOutputStream()));
origin: org.apache.hadoop/hadoop-hdfs

updateCurrentThreadName("Getting checksum for block " + block);
final DataOutputStream out = new DataOutputStream(
  getOutputStream());
checkAccess(out, true, block, blockToken, Op.BLOCK_CHECKSUM,
  BlockTokenIdentifier.AccessMode.READ);
origin: org.apache.hadoop/hadoop-hdfs

updateCurrentThreadName("Getting checksum for block group" +
  block);
final DataOutputStream out = new DataOutputStream(getOutputStream());
checkAccess(out, true, block, blockToken, Op.BLOCK_GROUP_CHECKSUM,
  BlockTokenIdentifier.AccessMode.READ);
origin: org.apache.hadoop/hadoop-hdfs

  null, CachingStrategy.newDropBehind());
OutputStream baseStream = getOutputStream();
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Separated for testing.
 * @return
 */
DataOutputStream getBufferedOutputStream() {
 return new DataOutputStream(
   new BufferedOutputStream(getOutputStream(),
   HdfsConstants.SMALL_BUFFER_SIZE));
}
origin: io.prestosql.hadoop/hadoop-apache

/**
 * Separated for testing.
 * @return
 */
DataOutputStream getBufferedOutputStream() {
 return new DataOutputStream(
   new BufferedOutputStream(getOutputStream(),
   HdfsConstants.SMALL_BUFFER_SIZE));
}
origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Utility function for sending a response.
 * 
 * @param status status message to write
 * @param message message to send to the client or other DN
 */
private void sendResponse(Status status,
  String message) throws IOException {
 writeResponse(status, message, getOutputStream());
}
origin: io.prestosql.hadoop/hadoop-apache

/**
 * Utility function for sending a response.
 * 
 * @param status status message to write
 * @param message message to send to the client or other DN
 */
private void sendResponse(Status status,
  String message) throws IOException {
 writeResponse(status, message, getOutputStream());
}
origin: ch.cern.hadoop/hadoop-hdfs

@Override
public void transferBlock(final ExtendedBlock blk,
  final Token<BlockTokenIdentifier> blockToken,
  final String clientName,
  final DatanodeInfo[] targets,
  final StorageType[] targetStorageTypes) throws IOException {
 previousOpClientName = clientName;
 updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);
 final DataOutputStream out = new DataOutputStream(
   getOutputStream());
 checkAccess(out, true, blk, blockToken,
   Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY);
 try {
  datanode.transferReplicaForPipelineRecovery(blk, targets,
    targetStorageTypes, clientName);
  writeResponse(Status.SUCCESS, null, out);
 } catch (IOException ioe) {
  LOG.info("transferBlock " + blk + " received exception " + ioe);
  incrDatanodeNetworkErrors();
  throw ioe;
 } finally {
  IOUtils.closeStream(out);
 }
}
origin: io.prestosql.hadoop/hadoop-apache

@Override
public void transferBlock(final ExtendedBlock blk,
  final Token<BlockTokenIdentifier> blockToken,
  final String clientName,
  final DatanodeInfo[] targets,
  final StorageType[] targetStorageTypes) throws IOException {
 previousOpClientName = clientName;
 updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);
 final DataOutputStream out = new DataOutputStream(
   getOutputStream());
 checkAccess(out, true, blk, blockToken,
   Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY);
 try {
  datanode.transferReplicaForPipelineRecovery(blk, targets,
    targetStorageTypes, clientName);
  writeResponse(Status.SUCCESS, null, out);
 } catch (IOException ioe) {
  LOG.info("transferBlock " + blk + " received exception " + ioe);
  incrDatanodeNetworkErrors();
  throw ioe;
 } finally {
  IOUtils.closeStream(out);
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

long read = 0;
updateCurrentThreadName("Sending block " + block);
OutputStream baseStream = getOutputStream();
DataOutputStream out = getBufferedOutputStream();
checkAccess(out, true, block, blockToken,
 writeSuccessWithChecksumInfo(blockSender, new DataOutputStream(getOutputStream()));
origin: ch.cern.hadoop/hadoop-hdfs

updateCurrentThreadName("Getting checksum for block " + block);
final DataOutputStream out = new DataOutputStream(
  getOutputStream());
checkAccess(out, true, block, blockToken,
  Op.BLOCK_CHECKSUM, BlockTokenSecretManager.AccessMode.READ);
origin: ch.cern.hadoop/hadoop-hdfs

  null, CachingStrategy.newDropBehind());
OutputStream baseStream = getOutputStream();
origin: io.prestosql.hadoop/hadoop-apache

updateCurrentThreadName("Getting checksum for block " + block);
final DataOutputStream out = new DataOutputStream(
  getOutputStream());
checkAccess(out, true, block, blockToken,
  Op.BLOCK_CHECKSUM, BlockTokenSecretManager.AccessMode.READ);
origin: io.prestosql.hadoop/hadoop-apache

long read = 0;
updateCurrentThreadName("Sending block " + block);
OutputStream baseStream = getOutputStream();
DataOutputStream out = getBufferedOutputStream();
checkAccess(out, true, block, blockToken,
 writeSuccessWithChecksumInfo(blockSender, new DataOutputStream(getOutputStream()));
origin: io.prestosql.hadoop/hadoop-apache

  null, CachingStrategy.newDropBehind());
OutputStream baseStream = getOutputStream();
org.apache.hadoop.hdfs.server.datanodeDataXceivergetOutputStream

Popular methods of DataXceiver

  • <init>
  • sendResponse
    Utility function for sending a response.
  • updateCurrentThreadName
    Update the current thread's name to contain the current status. Use this only after this receiver ha
  • checkAccess
  • checkAndWaitForBP
    Wait until the BP is registered, upto the configured amount of time. Throws an exception if times ou
  • create
  • elapsed
  • getBufferedOutputStream
    Separated for testing.
  • incrDatanodeNetworkErrors
  • processOp
  • readOp
  • releaseSocket
  • readOp,
  • releaseSocket,
  • sendOOB,
  • sendShmErrorResponse,
  • sendShmSuccessResponse,
  • writeResponse,
  • writeSuccessWithChecksumInfo,
  • calcPartialBlockChecksum,
  • copyBlock

Popular in Java

  • Parsing JSON documents to java classes using gson
  • notifyDataSetChanged (ArrayAdapter)
  • requestLocationUpdates (LocationManager)
  • compareTo (BigDecimal)
  • EOFException (java.io)
    Thrown when a program encounters the end of a file or stream during an input operation.
  • InputStreamReader (java.io)
    A class for turning a byte stream into a character stream. Data read from the source input stream is
  • MalformedURLException (java.net)
    This exception is thrown when a program attempts to create an URL from an incorrect specification.
  • URLConnection (java.net)
    A connection to a URL for reading or writing. For HTTP connections, see HttpURLConnection for docume
  • Time (java.sql)
    Java representation of an SQL TIME value. Provides utilities to format and parse the time's represen
  • Map (java.util)
    A Map is a data structure consisting of a set of keys and values in which each key is mapped to a si
  • Github Copilot alternatives
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now