/** * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code> */ public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) { if (jidBuilder_ == null) { if (value == null) { throw new NullPointerException(); } jid_ = value; onChanged(); } else { jidBuilder_.setMessage(value); } bitField0_ |= 0x00000001; return this; } /**
/** * <code>required .hadoop.hdfs.StorageInfoProto prevStorage = 3;</code> */ public Builder setPrevStorage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto value) { if (prevStorageBuilder_ == null) { if (value == null) { throw new NullPointerException(); } prevStorage_ = value; onChanged(); } else { prevStorageBuilder_.setMessage(value); } bitField0_ |= 0x00000004; return this; } /**
/** * <code>required .hadoop.hdfs.StorageInfoProto storage = 2;</code> */ public Builder setStorage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto value) { if (storageBuilder_ == null) { if (value == null) { throw new NullPointerException(); } storage_ = value; onChanged(); } else { storageBuilder_.setMessage(value); } bitField0_ |= 0x00000002; return this; } /**
/** * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code> */ public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) { if (jidBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial(); } else { jid_ = value; } onChanged(); } else { jidBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /**
/** * <code>required .hadoop.hdfs.StorageInfoProto prevStorage = 3;</code> */ public Builder mergePrevStorage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto value) { if (prevStorageBuilder_ == null) { if (((bitField0_ & 0x00000004) == 0x00000004) && prevStorage_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance()) { prevStorage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.newBuilder(prevStorage_).mergeFrom(value).buildPartial(); } else { prevStorage_ = value; } onChanged(); } else { prevStorageBuilder_.mergeFrom(value); } bitField0_ |= 0x00000004; return this; } /**
/** * <code>required .hadoop.hdfs.StorageInfoProto storage = 2;</code> */ public Builder mergeStorage(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto value) { if (storageBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && storage_ != org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance()) { storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.newBuilder(storage_).mergeFrom(value).buildPartial(); } else { storage_ = value; } onChanged(); } else { storageBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /**
/** * <code>required .hadoop.hdfs.StorageInfoProto prevStorage = 3;</code> */ public Builder setPrevStorage( org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder builderForValue) { if (prevStorageBuilder_ == null) { prevStorage_ = builderForValue.build(); onChanged(); } else { prevStorageBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000004; return this; } /**
/** * <code>required .hadoop.hdfs.StorageInfoProto storage = 2;</code> */ public Builder setStorage( org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder builderForValue) { if (storageBuilder_ == null) { storage_ = builderForValue.build(); onChanged(); } else { storageBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000002; return this; } /**
/** * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code> */ public Builder setJid( org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) { if (jidBuilder_ == null) { jid_ = builderForValue.build(); onChanged(); } else { jidBuilder_.setMessage(builderForValue.build()); } bitField0_ |= 0x00000001; return this; } /**
/** * <code>required .hadoop.hdfs.StorageInfoProto prevStorage = 3;</code> */ public Builder clearPrevStorage() { if (prevStorageBuilder_ == null) { prevStorage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance(); onChanged(); } else { prevStorageBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000004); return this; } /**
/** * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code> */ public Builder clearJid() { if (jidBuilder_ == null) { jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance(); onChanged(); } else { jidBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); return this; } /**
/** * <code>required .hadoop.hdfs.StorageInfoProto storage = 2;</code> */ public Builder clearStorage() { if (storageBuilder_ == null) { storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.getDefaultInstance(); onChanged(); } else { storageBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); return this; } /**
/** * <code>optional string nameServiceId = 5;</code> */ public Builder setNameServiceIdBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000010; nameServiceId_ = value; onChanged(); return this; }
/** * <code>optional string nameServiceId = 5;</code> */ public Builder setNameServiceId( java.lang.String value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000010; nameServiceId_ = value; onChanged(); return this; } /**
/** * <code>required int32 targetLayoutVersion = 4;</code> */ public Builder setTargetLayoutVersion(int value) { bitField0_ |= 0x00000008; targetLayoutVersion_ = value; onChanged(); return this; } /**
/** * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code> */ public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() { bitField0_ |= 0x00000001; onChanged(); return getJidFieldBuilder().getBuilder(); } /**
/** * <code>required .hadoop.hdfs.StorageInfoProto storage = 2;</code> */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder getStorageBuilder() { bitField0_ |= 0x00000002; onChanged(); return getStorageFieldBuilder().getBuilder(); } /**
/** * <code>optional string nameServiceId = 5;</code> */ public Builder clearNameServiceId() { bitField0_ = (bitField0_ & ~0x00000010); nameServiceId_ = getDefaultInstance().getNameServiceId(); onChanged(); return this; } /**
/** * <code>required int32 targetLayoutVersion = 4;</code> */ public Builder clearTargetLayoutVersion() { bitField0_ = (bitField0_ & ~0x00000008); targetLayoutVersion_ = 0; onChanged(); return this; }
/** * <code>required .hadoop.hdfs.StorageInfoProto prevStorage = 3;</code> */ public org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto.Builder getPrevStorageBuilder() { bitField0_ |= 0x00000004; onChanged(); return getPrevStorageFieldBuilder().getBuilder(); } /**