public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto buildPartial() { org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto result = new org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } if (markerBuilder_ == null) { result.marker_ = marker_; } else { result.marker_ = markerBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; }
@java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasMarker()) { hash = (37 * hash) + MARKER_FIELD_NUMBER; hash = (53 * hash) + getMarker().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; }
public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeMessage(1, marker_); } getUnknownFields().writeTo(output); }
com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = break; default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { done = true; } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable();
@Override public ListResponseProto list(RpcController controller, ListRequestProto request) throws ServiceException { try { BlockProto marker = request.getMarker(); IterationResult iterationResult; if (marker.isInitialized()) { iterationResult = aliasMap.list(Optional.of(PBHelperClient.convert(marker))); } else { iterationResult = aliasMap.list(Optional.empty()); } ListResponseProto.Builder responseBuilder = ListResponseProto.newBuilder(); List<FileRegion> fileRegions = iterationResult.getFileRegions(); List<KeyValueProto> keyValueProtos = fileRegions.stream() .map(PBHelper::convert).collect(Collectors.toList()); responseBuilder.addAllFileRegions(keyValueProtos); Optional<Block> nextMarker = iterationResult.getNextBlock(); nextMarker .map(m -> responseBuilder.setNextMarker(PBHelperClient.convert(m))); return responseBuilder.build(); } catch (IOException e) { throw new ServiceException(e); } }
public final com.google.protobuf.Message getRequestPrototype( com.google.protobuf.Descriptors.MethodDescriptor method) { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.getRequestPrototype() given method " + "descriptor for wrong service type."); } switch(method.getIndex()) { case 0: return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto.getDefaultInstance(); case 1: return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto.getDefaultInstance(); case 2: return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto.getDefaultInstance(); case 3: return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } }
public final com.google.protobuf.Message getRequestPrototype( com.google.protobuf.Descriptors.MethodDescriptor method) { if (method.getService() != getDescriptor()) { throw new java.lang.IllegalArgumentException( "Service.getRequestPrototype() given method " + "descriptor for wrong service type."); } switch(method.getIndex()) { case 0: return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.WriteRequestProto.getDefaultInstance(); case 1: return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ReadRequestProto.getDefaultInstance(); case 2: return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto.getDefaultInstance(); case 3: return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.BlockPoolRequestProto.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } }
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto other = (org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto) obj; boolean result = true; result = result && (hasMarker() == other.hasMarker()); if (hasMarker()) { result = result && getMarker() .equals(other.getMarker()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; }
public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto.getDefaultInstance(); }
public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, marker_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; }
public org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto build() { org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; }
public Builder mergeFrom(org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto other) { if (other == org.apache.hadoop.hdfs.protocol.proto.AliasMapProtocolProtos.ListRequestProto.getDefaultInstance()) return this; if (other.hasMarker()) { mergeMarker(other.getMarker()); } this.mergeUnknownFields(other.getUnknownFields()); return this; }