@Override public void acceptRecovery(RequestInfo reqInfo, SegmentStateProto stateToAccept, URL fromUrl) throws IOException { try { rpcProxy.acceptRecovery(NULL_CONTROLLER, AcceptRecoveryRequestProto.newBuilder() .setReqInfo(convert(reqInfo)) .setStateToAccept(stateToAccept) .setFromURL(fromUrl.toExternalForm()) .build()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } }
/** * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code> * * <pre> ** Details on the segment to recover * </pre> */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> getStateToAcceptFieldBuilder() { if (stateToAcceptBuilder_ == null) { stateToAcceptBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder>( stateToAccept_, getParentForChildren(), isClean()); stateToAccept_ = null; } return stateToAcceptBuilder_; }
/** * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code> */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() { bitField0_ |= 0x00000001; onChanged(); return getReqInfoFieldBuilder().getBuilder(); } /**
/** * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code> * * <pre> ** Details on the segment to recover * </pre> */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> getStateToAcceptFieldBuilder() { if (stateToAcceptBuilder_ == null) { stateToAcceptBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder>( stateToAccept_, getParentForChildren(), isClean()); stateToAccept_ = null; } return stateToAcceptBuilder_; }
/** * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code> * * <pre> ** Details on the segment to recover * </pre> */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> getStateToAcceptFieldBuilder() { if (stateToAcceptBuilder_ == null) { stateToAcceptBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder>( stateToAccept_, getParentForChildren(), isClean()); stateToAccept_ = null; } return stateToAcceptBuilder_; }
public final boolean isInitialized() { if (!hasReqInfo()) { return false; } if (!hasStateToAccept()) { return false; } if (!hasFromURL()) { return false; } if (!getReqInfo().isInitialized()) { return false; } if (!getStateToAccept().isInitialized()) { return false; } return true; }
public final boolean isInitialized() { if (!hasReqInfo()) { return false; } if (!hasStateToAccept()) { return false; } if (!hasFromURL()) { return false; } if (!getReqInfo().isInitialized()) { return false; } if (!getStateToAccept().isInitialized()) { return false; } return true; }
/** * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code> */ private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> getReqInfoFieldBuilder() { if (reqInfoBuilder_ == null) { reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>( reqInfo_, getParentForChildren(), isClean()); reqInfo_ = null; } return reqInfoBuilder_; }
/** * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code> */ private io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> getReqInfoFieldBuilder() { if (reqInfoBuilder_ == null) { reqInfoBuilder_ = new io.prestosql.hadoop.$internal.com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>( reqInfo_, getParentForChildren(), isClean()); reqInfo_ = null; } return reqInfoBuilder_; }
private static Builder create() { return new Builder(); }
private static Builder create() { return new Builder(); }
@Override public void acceptRecovery(RequestInfo reqInfo, SegmentStateProto stateToAccept, URL fromUrl) throws IOException { try { rpcProxy.acceptRecovery(NULL_CONTROLLER, AcceptRecoveryRequestProto.newBuilder() .setReqInfo(convert(reqInfo)) .setStateToAccept(stateToAccept) .setFromURL(fromUrl.toExternalForm()) .build()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } }
public Builder clone() { return create().mergeFrom(buildPartial()); }
@Override public void acceptRecovery(RequestInfo reqInfo, SegmentStateProto stateToAccept, URL fromUrl) throws IOException { try { rpcProxy.acceptRecovery(NULL_CONTROLLER, AcceptRecoveryRequestProto.newBuilder() .setReqInfo(convert(reqInfo)) .setStateToAccept(stateToAccept) .setFromURL(fromUrl.toExternalForm()) .build()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } }
public Builder clone() { return create().mergeFrom(buildPartial()); }
/** * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code> * * <pre> ** Details on the segment to recover * </pre> */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder getStateToAcceptBuilder() { bitField0_ |= 0x00000002; onChanged(); return getStateToAcceptFieldBuilder().getBuilder(); } /**
@java.lang.Override protected Builder newBuilderForType( io.prestosql.hadoop.$internal.com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /**
/** * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code> */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() { bitField0_ |= 0x00000001; onChanged(); return getReqInfoFieldBuilder().getBuilder(); } /**
@java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /**
/** * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code> * * <pre> ** Details on the segment to recover * </pre> */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder getStateToAcceptBuilder() { bitField0_ |= 0x00000002; onChanged(); return getStateToAcceptFieldBuilder().getBuilder(); } /**