private void check(DataTransferEncryptorMessageProto proto) throws IOException { if (proto.getStatus() == DataTransferEncryptorStatus.ERROR_UNKNOWN_KEY) { dfsClient.clearDataEncryptionKey(); throw new InvalidEncryptionKeyException(proto.getMessage()); } else if (proto.getStatus() == DataTransferEncryptorStatus.ERROR) { throw new IOException(proto.getMessage()); } }
private void sendSaslMessage(ChannelHandlerContext ctx, byte[] payload, List<CipherOption> options) throws IOException { DataTransferEncryptorMessageProto.Builder builder = DataTransferEncryptorMessageProto.newBuilder(); builder.setStatus(DataTransferEncryptorStatus.SUCCESS); if (payload != null) { // Was ByteStringer; fix w/o using ByteStringer. Its in hbase-protocol // and we want to keep that out of hbase-server. builder.setPayload(ByteString.copyFrom(payload)); } if (options != null) { builder.addAllCipherOption(PB_HELPER.convertCipherOptions(options)); } DataTransferEncryptorMessageProto proto = builder.build(); int size = proto.getSerializedSize(); size += CodedOutputStream.computeRawVarint32Size(size); ByteBuf buf = ctx.alloc().buffer(size); proto.writeDelimitedTo(new ByteBufOutputStream(buf)); ctx.write(buf); }
private CipherOption getCipherOption(DataTransferEncryptorMessageProto proto, boolean isNegotiatedQopPrivacy, SaslClient saslClient) throws IOException { List<CipherOption> cipherOptions = PB_HELPER.convertCipherOptionProtos(proto.getCipherOptionList()); if (cipherOptions == null || cipherOptions.isEmpty()) { return null; } CipherOption cipherOption = cipherOptions.get(0); return isNegotiatedQopPrivacy ? unwrap(cipherOption, saslClient) : cipherOption; }
ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock()); blockCopy.setNumBytes(locatedBlock.getBlockSize()); ClientOperationHeaderProto header = ClientOperationHeaderProto.newBuilder() .setBaseHeader(BaseHeaderProto.newBuilder().setBlock(PB_HELPER.convert(blockCopy)) .setToken(PB_HELPER.convert(locatedBlock.getBlockToken()))) .setClientName(clientName).build(); ChecksumProto checksumProto = DataTransferProtoUtil.toProto(summer); OpWriteBlockProto.Builder writeBlockProtoBuilder = OpWriteBlockProto.newBuilder() .setHeader(header).setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name())) .setPipelineSize(1).setMinBytesRcvd(locatedBlock.getBlock().getNumBytes()) .setMaxBytesRcvd(maxBytesRcvd).setLatestGenerationStamp(latestGS) .setRequestedChecksum(checksumProto) .setCachingStrategy(CachingStrategyProto.newBuilder().setDropBehind(true).build()); List<Future<Channel>> futureList = new ArrayList<>(datanodeInfos.length); for (int i = 0; i < datanodeInfos.length; i++) {
private void initFields() { cmdType_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto.Type.BalancerBandwidthCommand; balancerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto.getDefaultInstance(); blkCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto.getDefaultInstance(); recoveryCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto.getDefaultInstance(); finalizeCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto.getDefaultInstance(); keyUpdateCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto.getDefaultInstance(); registerCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto.getDefaultInstance(); blkIdCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto.getDefaultInstance(); blkECReconstructionCmd_ = org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto.getDefaultInstance(); } private byte memoizedIsInitialized = -1;
@Override protected void channelRead0(ChannelHandlerContext ctx, BlockOpResponseProto resp) throws Exception { Status pipelineStatus = resp.getStatus(); if (PipelineAck.isRestartOOBStatus(pipelineStatus)) { throw new IOException("datanode " + dnInfo + " is restarting"); String logInfo = "ack with firstBadLink as " + resp.getFirstBadLink(); if (resp.getStatus() != Status.SUCCESS) { if (resp.getStatus() == Status.ERROR_ACCESS_TOKEN) { throw new InvalidBlockTokenException("Got access token error" + ", status message " + resp.getMessage() + ", " + logInfo); } else { throw new IOException("Got error" + ", status=" + resp.getStatus().name() + ", status message " + resp.getMessage() + ", " + logInfo);
private static void requestWriteBlock(Channel channel, Enum<?> storageType, OpWriteBlockProto.Builder writeBlockProtoBuilder) throws IOException { OpWriteBlockProto proto = STORAGE_TYPE_SETTER.set(writeBlockProtoBuilder, storageType).build(); int protoLen = proto.getSerializedSize(); ByteBuf buffer = channel.alloc().buffer(3 + CodedOutputStream.computeRawVarint32Size(protoLen) + protoLen); buffer.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION); buffer.writeByte(Op.WRITE_BLOCK.code); proto.writeDelimitedTo(new ByteBufOutputStream(buffer)); channel.writeAndFlush(buffer); }
private void initFields() { newGenStamp_ = 0L; block_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.getDefaultInstance(); truncateBlock_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto.getDefaultInstance(); ecPolicy_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto.getDefaultInstance(); blockIndices_ = com.google.protobuf.ByteString.EMPTY; } private byte memoizedIsInitialized = -1;
public final boolean isInitialized() { if (hasMarker()) { if (!getMarker().isInitialized()) { return false; } } return true; }
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolResponseProto.getDefaultInstance()) return this; if (other.hasBlockPoolId()) { bitField0_ |= 0x00000001; blockPoolId_ = other.blockPoolId_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; }
@Override protected void channelRead0(ChannelHandlerContext ctx, PipelineAckProto ack) throws Exception { Status reply = getStatus(ack); if (reply != Status.SUCCESS) { failed(ctx.channel(), () -> new IOException("Bad response " + reply + " for block " + block + " from datanode " + ctx.channel().remoteAddress())); return; } if (PipelineAck.isRestartOOBStatus(reply)) { failed(ctx.channel(), () -> new IOException("Restart response " + reply + " for block " + block + " from datanode " + ctx.channel().remoteAddress())); return; } if (ack.getSeqno() == HEART_BEAT_SEQNO) { return; } completed(ctx.channel()); }
private static void doSaslNegotiation(Configuration conf, Channel channel, int timeoutMs, String username, char[] password, Map<String, String> saslProps, Promise<Void> saslPromise, DFSClient dfsClient) { try { channel.pipeline().addLast(new IdleStateHandler(timeoutMs, 0, 0, TimeUnit.MILLISECONDS), new ProtobufVarint32FrameDecoder(), new ProtobufDecoder(DataTransferEncryptorMessageProto.getDefaultInstance()), new SaslNegotiateHandler(conf, username, password, saslProps, timeoutMs, saslPromise, dfsClient)); } catch (SaslException e) { saslPromise.tryFailure(e); } }
channel.pipeline().addLast(new IdleStateHandler(timeoutMs, 0, 0, TimeUnit.MILLISECONDS), new ProtobufVarint32FrameDecoder(), new ProtobufDecoder(BlockOpResponseProto.getDefaultInstance()), new SimpleChannelInboundHandler<BlockOpResponseProto>() {
private void setupReceiver(int timeoutMs) { AckHandler ackHandler = new AckHandler(timeoutMs); for (Channel ch : datanodeList) { ch.pipeline().addLast( new IdleStateHandler(timeoutMs, timeoutMs / 2, 0, TimeUnit.MILLISECONDS), new ProtobufVarint32FrameDecoder(), new ProtobufDecoder(PipelineAckProto.getDefaultInstance()), ackHandler); ch.config().setAutoRead(true); } }
DataTransferEncryptorMessageProto proto = (DataTransferEncryptorMessageProto) msg; check(proto); byte[] challenge = proto.getPayload().toByteArray(); byte[] response = saslClient.evaluateChallenge(challenge); switch (step) {
public final boolean isInitialized() { if (!hasKeyValuePair()) { return false; } if (!getKeyValuePair().isInitialized()) { return false; } return true; }
public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; }
public final boolean isInitialized() { if (hasValue()) { if (!getValue().isInitialized()) { return false; } } return true; }
public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; }
private static StorageTypeSetter createStorageTypeSetter() throws NoSuchMethodException { Method setStorageTypeMethod = OpWriteBlockProto.Builder.class.getMethod("setStorageType", StorageTypeProto.class); ImmutableMap.Builder<String, StorageTypeProto> builder = ImmutableMap.builder(); for (StorageTypeProto storageTypeProto : StorageTypeProto.values()) { builder.put(storageTypeProto.name(), storageTypeProto); } ImmutableMap<String, StorageTypeProto> name2ProtoEnum = builder.build(); return new StorageTypeSetter() { @Override public OpWriteBlockProto.Builder set(OpWriteBlockProto.Builder builder, Enum<?> storageType) { Object protoEnum = name2ProtoEnum.get(storageType.name()); try { setStorageTypeMethod.invoke(builder, protoEnum); } catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException e) { throw new RuntimeException(e); } return builder; } }; }