/** * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code> */ public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) { if (reqInfoBuilder_ == null) { if (value == null) { throw new NullPointerException(); } reqInfo_ = value; onChanged(); } else { reqInfoBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /**
/** * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code> * * <pre> ** Details on the segment to recover * </pre> */ public Builder setStateToAccept(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) { if (stateToAcceptBuilder_ == null) { if (value == null) { throw new NullPointerException(); } stateToAccept_ = value; onChanged(); } else { stateToAcceptBuilder_.setMessage(value); } bitField0_ |= 0x00000002; return this; } /**
/** * <code>required string fromURL = 3;</code> * * <pre> ** The URL from which the log may be copied * </pre> */ public Builder setFromURLBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; fromURL_ = value; onChanged(); return this; }
/** * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code> * * <pre> ** Details on the segment to recover * </pre> */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder getStateToAcceptBuilder() { bitField0_ |= 0x00000002; onChanged(); return getStateToAcceptFieldBuilder().getBuilder(); } /**
/** * <code>required string fromURL = 3;</code> * * <pre> ** The URL from which the log may be copied * </pre> */ public Builder clearFromURL() { bitField0_ = (bitField0_ & ~0x00000004); fromURL_ = getDefaultInstance().getFromURL(); onChanged(); return this; } /**
/** * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code> */ public Builder clearReqInfo() { if (reqInfoBuilder_ == null) { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); onChanged(); } else { reqInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /**
/** * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code> */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() { bitField0_ |= 0x00000001; onChanged(); return getReqInfoFieldBuilder().getBuilder(); } /**
/** * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code> * * <pre> ** Details on the segment to recover * </pre> */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder getStateToAcceptBuilder() { bitField0_ |= 0x00000002; onChanged(); return getStateToAcceptFieldBuilder().getBuilder(); } /**
/** * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code> */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() { bitField0_ |= 0x00000001; onChanged(); return getReqInfoFieldBuilder().getBuilder(); } /**
/** * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code> * * <pre> ** Details on the segment to recover * </pre> */ public Builder mergeStateToAccept(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) { if (stateToAcceptBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && stateToAccept_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) { stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(stateToAccept_).mergeFrom(value).buildPartial(); } else { stateToAccept_ = value; } onChanged(); } else { stateToAcceptBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /**
/** * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code> */ public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) { if (reqInfoBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial(); } else { reqInfo_ = value; } onChanged(); } else { reqInfoBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /**
/** * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code> * * <pre> ** Details on the segment to recover * </pre> */ public Builder setStateToAccept( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder builderForValue) { if (stateToAcceptBuilder_ == null) { stateToAccept_ = builderForValue.build(); onChanged(); } else { stateToAcceptBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000002; return this; } /**
/** * <code>required string fromURL = 3;</code> * * <pre> ** The URL from which the log may be copied * </pre> */ public Builder setFromURL( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; fromURL_ = value; onChanged(); return this; } /**
/** * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code> * * <pre> ** Details on the segment to recover * </pre> */ public Builder clearStateToAccept() { if (stateToAcceptBuilder_ == null) { stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance(); onChanged(); } else { stateToAcceptBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } /**
public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance()) return this; if (other.hasReqInfo()) { mergeReqInfo(other.getReqInfo()); } if (other.hasStateToAccept()) { mergeStateToAccept(other.getStateToAccept()); } if (other.hasFromURL()) { bitField0_ |= 0x00000004; fromURL_ = other.fromURL_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; }
/** * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code> */ public Builder setReqInfo( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) { if (reqInfoBuilder_ == null) { reqInfo_ = builderForValue.build(); onChanged(); } else { reqInfoBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /**
/** * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code> */ public Builder clearReqInfo() { if (reqInfoBuilder_ == null) { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); onChanged(); } else { reqInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /**
/** * <code>required string fromURL = 3;</code> * * <pre> ** The URL from which the log may be copied * </pre> */ public Builder clearFromURL() { bitField0_ = (bitField0_ & ~0x00000004); fromURL_ = getDefaultInstance().getFromURL(); onChanged(); return this; } /**
/** * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code> * * <pre> ** Details on the segment to recover * </pre> */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder getStateToAcceptBuilder() { bitField0_ |= 0x00000002; onChanged(); return getStateToAcceptFieldBuilder().getBuilder(); } /**
/** * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code> */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() { bitField0_ |= 0x00000001; onChanged(); return getReqInfoFieldBuilder().getBuilder(); } /**