public Builder clear() { super.clear(); if (segmentStateBuilder_ == null) { segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance(); } else { segmentStateBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); acceptedInEpoch_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); return this; }
public Builder clear() { super.clear(); if (segmentStateBuilder_ == null) { segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance(); } else { segmentStateBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); acceptedInEpoch_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); lastWriterEpoch_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); lastCommittedTxId_ = 0L; bitField0_ = (bitField0_ & ~0x00000008); return this; }
public Builder clear() { super.clear(); if (reqInfoBuilder_ == null) { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); } else { reqInfoBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); if (stateToAcceptBuilder_ == null) { stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance(); } else { stateToAcceptBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); fromURL_ = ""; bitField0_ = (bitField0_ & ~0x00000004); return this; }
private void initFields() { segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance(); acceptedInEpoch_ = 0L; } private byte memoizedIsInitialized = -1;
private void initFields() { segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance(); acceptedInEpoch_ = 0L; lastWriterEpoch_ = 0L; lastCommittedTxId_ = 0L; } private byte memoizedIsInitialized = -1;
public Builder clear() { super.clear(); if (segmentStateBuilder_ == null) { segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance(); } else { segmentStateBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); acceptedInEpoch_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); return this; }
public Builder clear() { super.clear(); if (segmentStateBuilder_ == null) { segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance(); } else { segmentStateBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); acceptedInEpoch_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); return this; }
private void initFields() { segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance(); acceptedInEpoch_ = 0L; } private byte memoizedIsInitialized = -1;
private void initFields() { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance(); fromURL_ = ""; } private byte memoizedIsInitialized = -1;
/** * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code> * * <pre> ** Details on the segment to recover * </pre> */ public Builder mergeStateToAccept(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) { if (stateToAcceptBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && stateToAccept_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) { stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(stateToAccept_).mergeFrom(value).buildPartial(); } else { stateToAccept_ = value; } onChanged(); } else { stateToAcceptBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /**
/** * <code>required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code> */ public Builder mergeSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) { if (segmentStateBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && segmentState_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) { segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(segmentState_).mergeFrom(value).buildPartial(); } else { segmentState_ = value; } onChanged(); } else { segmentStateBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /**
/** * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code> * * <pre> ** Details on the segment to recover * </pre> */ public Builder clearStateToAccept() { if (stateToAcceptBuilder_ == null) { stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance(); onChanged(); } else { stateToAcceptBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } /**
/** * <code>optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code> */ public Builder mergeSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) { if (segmentStateBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && segmentState_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) { segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(segmentState_).mergeFrom(value).buildPartial(); } else { segmentState_ = value; } onChanged(); } else { segmentStateBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /**
/** * <code>required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code> */ public Builder clearSegmentState() { if (segmentStateBuilder_ == null) { segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance(); onChanged(); } else { segmentStateBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /**
public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance(); }
/** * <code>optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code> */ public Builder clearSegmentState() { if (segmentStateBuilder_ == null) { segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance(); onChanged(); } else { segmentStateBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /**
public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) return this; if (other.hasStartTxId()) { setStartTxId(other.getStartTxId()); } if (other.hasEndTxId()) { setEndTxId(other.getEndTxId()); } if (other.hasIsInProgress()) { setIsInProgress(other.getIsInProgress()); } this.mergeUnknownFields(other.getUnknownFields()); return this; }
private void initFields() { segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance(); acceptedInEpoch_ = 0L; lastWriterEpoch_ = 0L; lastCommittedTxId_ = 0L; } private byte memoizedIsInitialized = -1;
private void initFields() { segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance(); acceptedInEpoch_ = 0L; } private byte memoizedIsInitialized = -1;
private void initFields() { reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance(); stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance(); fromURL_ = ""; } private byte memoizedIsInitialized = -1;