Tabnine Logo
org.apache.hadoop.hdfs.protocol.proto
Code IndexAdd Tabnine to your IDE (free)

How to use org.apache.hadoop.hdfs.protocol.proto

Best Java code snippets using org.apache.hadoop.hdfs.protocol.proto (Showing top 20 results out of 315)

origin: apache/hbase

private void check(DataTransferEncryptorMessageProto proto) throws IOException {
 if (proto.getStatus() == DataTransferEncryptorStatus.ERROR_UNKNOWN_KEY) {
  dfsClient.clearDataEncryptionKey();
  throw new InvalidEncryptionKeyException(proto.getMessage());
 } else if (proto.getStatus() == DataTransferEncryptorStatus.ERROR) {
  throw new IOException(proto.getMessage());
 }
}
origin: apache/hbase

private void sendSaslMessage(ChannelHandlerContext ctx, byte[] payload,
  List<CipherOption> options) throws IOException {
 DataTransferEncryptorMessageProto.Builder builder =
   DataTransferEncryptorMessageProto.newBuilder();
 builder.setStatus(DataTransferEncryptorStatus.SUCCESS);
 if (payload != null) {
  // Was ByteStringer; fix w/o using ByteStringer. Its in hbase-protocol
  // and we want to keep that out of hbase-server.
  builder.setPayload(ByteString.copyFrom(payload));
 }
 if (options != null) {
  builder.addAllCipherOption(PB_HELPER.convertCipherOptions(options));
 }
 DataTransferEncryptorMessageProto proto = builder.build();
 int size = proto.getSerializedSize();
 size += CodedOutputStream.computeRawVarint32Size(size);
 ByteBuf buf = ctx.alloc().buffer(size);
 proto.writeDelimitedTo(new ByteBufOutputStream(buf));
 ctx.write(buf);
}
origin: apache/hbase

private CipherOption getCipherOption(DataTransferEncryptorMessageProto proto,
  boolean isNegotiatedQopPrivacy, SaslClient saslClient) throws IOException {
 List<CipherOption> cipherOptions =
   PB_HELPER.convertCipherOptionProtos(proto.getCipherOptionList());
 if (cipherOptions == null || cipherOptions.isEmpty()) {
  return null;
 }
 CipherOption cipherOption = cipherOptions.get(0);
 return isNegotiatedQopPrivacy ? unwrap(cipherOption, saslClient) : cipherOption;
}
origin: apache/hbase

ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock());
blockCopy.setNumBytes(locatedBlock.getBlockSize());
ClientOperationHeaderProto header = ClientOperationHeaderProto.newBuilder()
  .setBaseHeader(BaseHeaderProto.newBuilder().setBlock(PB_HELPER.convert(blockCopy))
    .setToken(PB_HELPER.convert(locatedBlock.getBlockToken())))
  .setClientName(clientName).build();
ChecksumProto checksumProto = DataTransferProtoUtil.toProto(summer);
OpWriteBlockProto.Builder writeBlockProtoBuilder = OpWriteBlockProto.newBuilder()
  .setHeader(header).setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name()))
  .setPipelineSize(1).setMinBytesRcvd(locatedBlock.getBlock().getNumBytes())
  .setMaxBytesRcvd(maxBytesRcvd).setLatestGenerationStamp(latestGS)
  .setRequestedChecksum(checksumProto)
  .setCachingStrategy(CachingStrategyProto.newBuilder().setDropBehind(true).build());
List<Future<Channel>> futureList = new ArrayList<>(datanodeInfos.length);
for (int i = 0; i < datanodeInfos.length; i++) {
origin: org.apache.hadoop/hadoop-hdfs

private void initFields() {
 cmdType_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.BalancerBandwidthCommand;
 balancerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance();
 blkCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance();
 recoveryCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance();
 finalizeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance();
 keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance();
 registerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance();
 blkIdCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance();
 blkECReconstructionCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
origin: apache/hbase

@Override
protected void channelRead0(ChannelHandlerContext ctx, BlockOpResponseProto resp)
  throws Exception {
 Status pipelineStatus = resp.getStatus();
 if (PipelineAck.isRestartOOBStatus(pipelineStatus)) {
  throw new IOException("datanode " + dnInfo + " is restarting");
 String logInfo = "ack with firstBadLink as " + resp.getFirstBadLink();
 if (resp.getStatus() != Status.SUCCESS) {
  if (resp.getStatus() == Status.ERROR_ACCESS_TOKEN) {
   throw new InvalidBlockTokenException("Got access token error" + ", status message " +
     resp.getMessage() + ", " + logInfo);
  } else {
   throw new IOException("Got error" + ", status=" + resp.getStatus().name() +
     ", status message " + resp.getMessage() + ", " + logInfo);
origin: apache/hbase

private static void requestWriteBlock(Channel channel, Enum<?> storageType,
  OpWriteBlockProto.Builder writeBlockProtoBuilder) throws IOException {
 OpWriteBlockProto proto = STORAGE_TYPE_SETTER.set(writeBlockProtoBuilder, storageType).build();
 int protoLen = proto.getSerializedSize();
 ByteBuf buffer =
   channel.alloc().buffer(3 + CodedOutputStream.computeRawVarint32Size(protoLen) + protoLen);
 buffer.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
 buffer.writeByte(Op.WRITE_BLOCK.code);
 proto.writeDelimitedTo(new ByteBufOutputStream(buffer));
 channel.writeAndFlush(buffer);
}
origin: org.apache.hadoop/hadoop-hdfs

private void initFields() {
 newGenStamp_ = 0L;
 block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance();
 truncateBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance();
 ecPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance();
 blockIndices_ = com.google.protobuf.ByteString.EMPTY;
}
private byte memoizedIsInitialized = -1;
origin: org.apache.hadoop/hadoop-hdfs

public final boolean isInitialized() {
 if (hasMarker()) {
  if (!getMarker().isInitialized()) {
   
   return false;
  }
 }
 return true;
}
origin: org.apache.hadoop/hadoop-hdfs

public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto other) {
 if (other == org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto.getDefaultInstance()) return this;
 if (other.hasBlockPoolId()) {
  bitField0_ |= 0x00000001;
  blockPoolId_ = other.blockPoolId_;
  onChanged();
 }
 this.mergeUnknownFields(other.getUnknownFields());
 return this;
}
origin: apache/hbase

@Override
protected void channelRead0(ChannelHandlerContext ctx, PipelineAckProto ack) throws Exception {
 Status reply = getStatus(ack);
 if (reply != Status.SUCCESS) {
  failed(ctx.channel(), () -> new IOException("Bad response " + reply + " for block " +
   block + " from datanode " + ctx.channel().remoteAddress()));
  return;
 }
 if (PipelineAck.isRestartOOBStatus(reply)) {
  failed(ctx.channel(), () -> new IOException("Restart response " + reply + " for block " +
   block + " from datanode " + ctx.channel().remoteAddress()));
  return;
 }
 if (ack.getSeqno() == HEART_BEAT_SEQNO) {
  return;
 }
 completed(ctx.channel());
}
origin: apache/hbase

private static void doSaslNegotiation(Configuration conf, Channel channel, int timeoutMs,
  String username, char[] password, Map<String, String> saslProps, Promise<Void> saslPromise,
  DFSClient dfsClient) {
 try {
  channel.pipeline().addLast(new IdleStateHandler(timeoutMs, 0, 0, TimeUnit.MILLISECONDS),
   new ProtobufVarint32FrameDecoder(),
   new ProtobufDecoder(DataTransferEncryptorMessageProto.getDefaultInstance()),
   new SaslNegotiateHandler(conf, username, password, saslProps, timeoutMs, saslPromise,
     dfsClient));
 } catch (SaslException e) {
  saslPromise.tryFailure(e);
 }
}
origin: apache/hbase

channel.pipeline().addLast(new IdleStateHandler(timeoutMs, 0, 0, TimeUnit.MILLISECONDS),
 new ProtobufVarint32FrameDecoder(),
 new ProtobufDecoder(BlockOpResponseProto.getDefaultInstance()),
 new SimpleChannelInboundHandler<BlockOpResponseProto>() {
origin: apache/hbase

private void setupReceiver(int timeoutMs) {
 AckHandler ackHandler = new AckHandler(timeoutMs);
 for (Channel ch : datanodeList) {
  ch.pipeline().addLast(
   new IdleStateHandler(timeoutMs, timeoutMs / 2, 0, TimeUnit.MILLISECONDS),
   new ProtobufVarint32FrameDecoder(),
   new ProtobufDecoder(PipelineAckProto.getDefaultInstance()), ackHandler);
  ch.config().setAutoRead(true);
 }
}
origin: apache/hbase

DataTransferEncryptorMessageProto proto = (DataTransferEncryptorMessageProto) msg;
check(proto);
byte[] challenge = proto.getPayload().toByteArray();
byte[] response = saslClient.evaluateChallenge(challenge);
switch (step) {
origin: org.apache.hadoop/hadoop-hdfs

public final boolean isInitialized() {
 if (!hasKeyValuePair()) {
  
  return false;
 }
 if (!getKeyValuePair().isInitialized()) {
  
  return false;
 }
 return true;
}
origin: org.apache.hadoop/hadoop-hdfs

public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto build() {
 org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto result = buildPartial();
 if (!result.isInitialized()) {
  throw newUninitializedMessageException(result);
 }
 return result;
}
origin: org.apache.hadoop/hadoop-hdfs

public final boolean isInitialized() {
 if (hasValue()) {
  if (!getValue().isInitialized()) {
   
   return false;
  }
 }
 return true;
}
origin: org.apache.hadoop/hadoop-hdfs

public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto build() {
 org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto result = buildPartial();
 if (!result.isInitialized()) {
  throw newUninitializedMessageException(result);
 }
 return result;
}
origin: apache/hbase

private static StorageTypeSetter createStorageTypeSetter() throws NoSuchMethodException {
 Method setStorageTypeMethod =
   OpWriteBlockProto.Builder.class.getMethod("setStorageType", StorageTypeProto.class);
 ImmutableMap.Builder<String, StorageTypeProto> builder = ImmutableMap.builder();
 for (StorageTypeProto storageTypeProto : StorageTypeProto.values()) {
  builder.put(storageTypeProto.name(), storageTypeProto);
 }
 ImmutableMap<String, StorageTypeProto> name2ProtoEnum = builder.build();
 return new StorageTypeSetter() {
  @Override
  public OpWriteBlockProto.Builder set(OpWriteBlockProto.Builder builder, Enum<?> storageType) {
   Object protoEnum = name2ProtoEnum.get(storageType.name());
   try {
    setStorageTypeMethod.invoke(builder, protoEnum);
   } catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException e) {
    throw new RuntimeException(e);
   }
   return builder;
  }
 };
}
org.apache.hadoop.hdfs.protocol.proto

Most used classes

  • DataTransferProtos$BlockOpResponseProto
    Protobuf type hadoop.hdfs.BlockOpResponseProto
  • ClientDatanodeProtocolProtos$ClientDatanodeProtocolService
    Protobuf service hadoop.hdfs.ClientDatanodeProtocolService Protocol used from client to the Datanod
  • ClientDatanodeProtocolProtos$GetReplicaVisibleLengthRequestProto
    Protobuf type hadoop.hdfs.GetReplicaVisibleLengthRequestProto block - block for which visible lengt
  • ClientDatanodeProtocolProtos$GetReplicaVisibleLengthResponseProto$Builder
    Protobuf type hadoop.hdfs.GetReplicaVisibleLengthResponseProto length - visible length of the block
  • ClientDatanodeProtocolProtos$GetReplicaVisibleLengthResponseProto
    Protobuf type hadoop.hdfs.GetReplicaVisibleLengthResponseProto length - visible length of the block
  • DataTransferProtos$BlockOpResponseProto$Builder,
  • DataTransferProtos$CachingStrategyProto,
  • DataTransferProtos$ClientOperationHeaderProto,
  • DataTransferProtos$OpWriteBlockProto,
  • DataTransferProtos$PipelineAckProto,
  • DataTransferProtos$ReadOpChecksumInfoProto$Builder,
  • DataTransferProtos$ReadOpChecksumInfoProto,
  • DataTransferProtos$Status,
  • HdfsProtos$BlockProto$Builder,
  • HdfsProtos$BlockProto,
  • HdfsProtos$ExtendedBlockProto,
  • HdfsProtos$LocatedBlockProto,
  • HdfsProtos$StorageTypeProto,
  • AclProtos$AclEntryProto$Builder
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now