congrats Icon
New! Tabnine Pro 14-day free trial
Start a free trial
Tabnine Logo
DataXceiver.checkAccess
Code IndexAdd Tabnine to your IDE (free)

How to use
checkAccess
method
in
org.apache.hadoop.hdfs.server.datanode.DataXceiver

Best Java code snippets using org.apache.hadoop.hdfs.server.datanode.DataXceiver.checkAccess (Showing top 20 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

@Override
public void transferBlock(final ExtendedBlock blk,
  final Token<BlockTokenIdentifier> blockToken,
  final String clientName,
  final DatanodeInfo[] targets,
  final StorageType[] targetStorageTypes,
  final String[] targetStorageIds) throws IOException {
 previousOpClientName = clientName;
 updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);
 final DataOutputStream out = new DataOutputStream(
   getOutputStream());
 checkAccess(out, true, blk, blockToken, Op.TRANSFER_BLOCK,
   BlockTokenIdentifier.AccessMode.COPY, targetStorageTypes,
   targetStorageIds);
 try {
  datanode.transferReplicaForPipelineRecovery(blk, targets,
    targetStorageTypes, targetStorageIds, clientName);
  writeResponse(Status.SUCCESS, null, out);
 } catch (IOException ioe) {
  LOG.info("transferBlock {} received exception {}",
    blk, ioe.toString());
  incrDatanodeNetworkErrors();
  throw ioe;
 } finally {
  IOUtils.closeStream(out);
 }
}
origin: org.apache.hadoop/hadoop-hdfs

 private void checkAccess(OutputStream out, final boolean reply,
   final ExtendedBlock blk,
   final Token<BlockTokenIdentifier> t,
   final Op op,
   final BlockTokenIdentifier.AccessMode mode,
   final StorageType[] storageTypes,
   final String[] storageIds) throws IOException {
  checkAndWaitForBP(blk);
  if (datanode.isBlockTokenEnabled) {
   LOG.debug("Checking block access token for block '{}' with mode '{}'",
     blk.getBlockId(), mode);
   try {
    datanode.blockPoolTokenSecretManager.checkAccess(t, null, blk, mode,
      storageTypes, storageIds);
   } catch(InvalidToken e) {
    try {
     if (reply) {
      BlockOpResponseProto.Builder resp = BlockOpResponseProto.newBuilder()
       .setStatus(ERROR_ACCESS_TOKEN);
      if (mode == BlockTokenIdentifier.AccessMode.WRITE) {
       DatanodeRegistration dnR = 
        datanode.getDNRegistrationForBP(blk.getBlockPoolId());
       // NB: Unconditionally using the xfer addr w/o hostname
       resp.setFirstBadLink(dnR.getXferAddr());
      }
      resp.build().writeDelimitedTo(out);
      out.flush();
     }
     LOG.warn("Block token verification failed: op={}, " +
         "remoteAddress={}, message={}",
origin: org.apache.hadoop/hadoop-hdfs

updateCurrentThreadName("Replacing block " + block + " from " + delHint);
DataOutputStream replyOut = new DataOutputStream(getOutputStream());
checkAccess(replyOut, true, block, blockToken,
  Op.REPLACE_BLOCK, BlockTokenIdentifier.AccessMode.REPLACE,
  new StorageType[]{storageType},
origin: org.apache.hadoop/hadoop-hdfs

final DataOutputStream out = new DataOutputStream(
  getOutputStream());
checkAccess(out, true, block, blockToken, Op.BLOCK_CHECKSUM,
  BlockTokenIdentifier.AccessMode.READ);
BlockChecksumComputer maker = new ReplicatedBlockChecksumComputer(
origin: org.apache.hadoop/hadoop-hdfs

updateCurrentThreadName("Passing file descriptors for block " + blk);
DataOutputStream out = getBufferedOutputStream();
checkAccess(out, true, blk, token,
  Op.REQUEST_SHORT_CIRCUIT_FDS, BlockTokenIdentifier.AccessMode.READ,
  null, null);
origin: org.apache.hadoop/hadoop-hdfs

  block);
final DataOutputStream out = new DataOutputStream(getOutputStream());
checkAccess(out, true, block, blockToken, Op.BLOCK_GROUP_CHECKSUM,
  BlockTokenIdentifier.AccessMode.READ);
origin: org.apache.hadoop/hadoop-hdfs

updateCurrentThreadName("Copying block " + block);
DataOutputStream reply = getBufferedOutputStream();
checkAccess(reply, true, block, blockToken, Op.COPY_BLOCK,
  BlockTokenIdentifier.AccessMode.COPY);
origin: org.apache.hadoop/hadoop-hdfs

OutputStream baseStream = getOutputStream();
DataOutputStream out = getBufferedOutputStream();
checkAccess(out, true, block, blockToken, Op.READ_BLOCK,
  BlockTokenIdentifier.AccessMode.READ);
origin: org.apache.hadoop/hadoop-hdfs

 storageIds = new String[0];
checkAccess(replyOut, isClient, block, blockToken, Op.WRITE_BLOCK,
  BlockTokenIdentifier.AccessMode.WRITE,
  storageTypes, storageIds);
origin: ch.cern.hadoop/hadoop-hdfs

@Override
public void transferBlock(final ExtendedBlock blk,
  final Token<BlockTokenIdentifier> blockToken,
  final String clientName,
  final DatanodeInfo[] targets,
  final StorageType[] targetStorageTypes) throws IOException {
 previousOpClientName = clientName;
 updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);
 final DataOutputStream out = new DataOutputStream(
   getOutputStream());
 checkAccess(out, true, blk, blockToken,
   Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY);
 try {
  datanode.transferReplicaForPipelineRecovery(blk, targets,
    targetStorageTypes, clientName);
  writeResponse(Status.SUCCESS, null, out);
 } catch (IOException ioe) {
  LOG.info("transferBlock " + blk + " received exception " + ioe);
  incrDatanodeNetworkErrors();
  throw ioe;
 } finally {
  IOUtils.closeStream(out);
 }
}
origin: io.prestosql.hadoop/hadoop-apache

@Override
public void transferBlock(final ExtendedBlock blk,
  final Token<BlockTokenIdentifier> blockToken,
  final String clientName,
  final DatanodeInfo[] targets,
  final StorageType[] targetStorageTypes) throws IOException {
 previousOpClientName = clientName;
 updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);
 final DataOutputStream out = new DataOutputStream(
   getOutputStream());
 checkAccess(out, true, blk, blockToken,
   Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY);
 try {
  datanode.transferReplicaForPipelineRecovery(blk, targets,
    targetStorageTypes, clientName);
  writeResponse(Status.SUCCESS, null, out);
 } catch (IOException ioe) {
  LOG.info("transferBlock " + blk + " received exception " + ioe);
  incrDatanodeNetworkErrors();
  throw ioe;
 } finally {
  IOUtils.closeStream(out);
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

final DataOutputStream out = new DataOutputStream(
  getOutputStream());
checkAccess(out, true, block, blockToken,
  Op.BLOCK_CHECKSUM, BlockTokenSecretManager.AccessMode.READ);
origin: ch.cern.hadoop/hadoop-hdfs

OutputStream baseStream = getOutputStream();
DataOutputStream out = getBufferedOutputStream();
checkAccess(out, true, block, blockToken,
  Op.READ_BLOCK, BlockTokenSecretManager.AccessMode.READ);
origin: ch.cern.hadoop/hadoop-hdfs

updateCurrentThreadName("Passing file descriptors for block " + blk);
DataOutputStream out = getBufferedOutputStream();
checkAccess(out, true, blk, token,
  Op.REQUEST_SHORT_CIRCUIT_FDS, BlockTokenSecretManager.AccessMode.READ);
BlockOpResponseProto.Builder bld = BlockOpResponseProto.newBuilder();
origin: ch.cern.hadoop/hadoop-hdfs

updateCurrentThreadName("Copying block " + block);
DataOutputStream reply = getBufferedOutputStream();
checkAccess(reply, true, block, blockToken,
  Op.COPY_BLOCK, BlockTokenSecretManager.AccessMode.COPY);
origin: io.prestosql.hadoop/hadoop-apache

updateCurrentThreadName("Replacing block " + block + " from " + delHint);
DataOutputStream replyOut = new DataOutputStream(getOutputStream());
checkAccess(replyOut, true, block, blockToken,
  Op.REPLACE_BLOCK, BlockTokenSecretManager.AccessMode.REPLACE);
origin: io.prestosql.hadoop/hadoop-apache

final DataOutputStream out = new DataOutputStream(
  getOutputStream());
checkAccess(out, true, block, blockToken,
  Op.BLOCK_CHECKSUM, BlockTokenSecretManager.AccessMode.READ);
origin: io.prestosql.hadoop/hadoop-apache

updateCurrentThreadName("Passing file descriptors for block " + blk);
DataOutputStream out = getBufferedOutputStream();
checkAccess(out, true, blk, token,
  Op.REQUEST_SHORT_CIRCUIT_FDS, BlockTokenSecretManager.AccessMode.READ);
BlockOpResponseProto.Builder bld = BlockOpResponseProto.newBuilder();
origin: io.prestosql.hadoop/hadoop-apache

OutputStream baseStream = getOutputStream();
DataOutputStream out = getBufferedOutputStream();
checkAccess(out, true, block, blockToken,
  Op.READ_BLOCK, BlockTokenSecretManager.AccessMode.READ);
origin: io.prestosql.hadoop/hadoop-apache

updateCurrentThreadName("Copying block " + block);
DataOutputStream reply = getBufferedOutputStream();
checkAccess(reply, true, block, blockToken,
  Op.COPY_BLOCK, BlockTokenSecretManager.AccessMode.COPY);
org.apache.hadoop.hdfs.server.datanodeDataXceivercheckAccess

Popular methods of DataXceiver

  • <init>
  • sendResponse
    Utility function for sending a response.
  • updateCurrentThreadName
    Update the current thread's name to contain the current status. Use this only after this receiver ha
  • checkAndWaitForBP
    Wait until the BP is registered, upto the configured amount of time. Throws an exception if times ou
  • create
  • elapsed
  • getBufferedOutputStream
    Separated for testing.
  • getOutputStream
  • incrDatanodeNetworkErrors
  • processOp
  • readOp
  • releaseSocket
  • readOp,
  • releaseSocket,
  • sendOOB,
  • sendShmErrorResponse,
  • sendShmSuccessResponse,
  • writeResponse,
  • writeSuccessWithChecksumInfo,
  • calcPartialBlockChecksum,
  • copyBlock

Popular in Java

  • Finding current android device location
  • getExternalFilesDir (Context)
  • notifyDataSetChanged (ArrayAdapter)
  • compareTo (BigDecimal)
  • BufferedWriter (java.io)
    Wraps an existing Writer and buffers the output. Expensive interaction with the underlying reader is
  • File (java.io)
    An "abstract" representation of a file system entity identified by a pathname. The pathname may be a
  • HashMap (java.util)
    HashMap is an implementation of Map. All optional operations are supported.All elements are permitte
  • Iterator (java.util)
    An iterator over a sequence of objects, such as a collection.If a collection has been changed since
  • TimerTask (java.util)
    The TimerTask class represents a task to run at a specified time. The task may be run once or repeat
  • JLabel (javax.swing)
  • Top 12 Jupyter Notebook Extensions
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimAtomGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyStudentsTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now