ClusterStatusProtos.ServerLoad.Builder serverLoad = ClusterStatusProtos.ServerLoad.newBuilder(); serverLoad.setNumberOfRequests((int) regionServerWrapper.getRequestsPerSecond()); serverLoad.setTotalNumberOfRequests(regionServerWrapper.getTotalRequestCount());
public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.ServerLoad other) { if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance()) return this; if (other.hasNumberOfRequests()) { setNumberOfRequests(other.getNumberOfRequests()); if (other.hasTotalNumberOfRequests()) { setTotalNumberOfRequests(other.getTotalNumberOfRequests()); if (other.hasUsedHeapMB()) { setUsedHeapMB(other.getUsedHeapMB()); if (other.hasMaxHeapMB()) { setMaxHeapMB(other.getMaxHeapMB()); if (other.hasReportStartTime()) { setReportStartTime(other.getReportStartTime()); if (other.hasReportEndTime()) { setReportEndTime(other.getReportEndTime()); if (other.hasInfoServerPort()) { setInfoServerPort(other.getInfoServerPort()); if (other.hasReplLoadSink()) { mergeReplLoadSink(other.getReplLoadSink());
hash = (19 * hash) + getDescriptor().hashCode(); if (hasNumberOfRequests()) { hash = (37 * hash) + NUMBER_OF_REQUESTS_FIELD_NUMBER; hash = (53 * hash) + org.apache.hbase.thirdparty.com.google.protobuf.Internal.hashLong( getNumberOfRequests()); if (hasTotalNumberOfRequests()) { hash = (37 * hash) + TOTAL_NUMBER_OF_REQUESTS_FIELD_NUMBER; hash = (53 * hash) + org.apache.hbase.thirdparty.com.google.protobuf.Internal.hashLong( getTotalNumberOfRequests()); if (hasUsedHeapMB()) { hash = (37 * hash) + USED_HEAP_MB_FIELD_NUMBER; hash = (53 * hash) + getUsedHeapMB(); if (hasMaxHeapMB()) { hash = (37 * hash) + MAX_HEAP_MB_FIELD_NUMBER; hash = (53 * hash) + getMaxHeapMB(); if (getRegionLoadsCount() > 0) { hash = (37 * hash) + REGION_LOADS_FIELD_NUMBER; hash = (53 * hash) + getRegionLoadsList().hashCode(); if (getCoprocessorsCount() > 0) { hash = (37 * hash) + COPROCESSORS_FIELD_NUMBER; hash = (53 * hash) + getCoprocessorsList().hashCode(); if (hasReportStartTime()) { hash = (37 * hash) + REPORT_START_TIME_FIELD_NUMBER;
public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.ServerLoad other) { if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance()) return this; if (other.hasNumberOfRequests()) { setNumberOfRequests(other.getNumberOfRequests()); if (other.hasTotalNumberOfRequests()) { setTotalNumberOfRequests(other.getTotalNumberOfRequests()); if (other.hasUsedHeapMB()) { setUsedHeapMB(other.getUsedHeapMB()); if (other.hasMaxHeapMB()) { setMaxHeapMB(other.getMaxHeapMB()); if (other.hasReportStartTime()) { setReportStartTime(other.getReportStartTime()); if (other.hasReportEndTime()) { setReportEndTime(other.getReportEndTime()); if (other.hasInfoServerPort()) { setInfoServerPort(other.getInfoServerPort()); if (other.hasReplLoadSink()) { mergeReplLoadSink(other.getReplLoadSink());
.computeMessageSize(11, getReplLoadSink());
" load { numberOfRequests: " + r.getLoad().getNumberOfRequests() + " }"; } else if (m instanceof RegionServerStartupRequest) {
hash = (19 * hash) + getDescriptor().hashCode(); if (hasNumberOfRequests()) { hash = (37 * hash) + NUMBER_OF_REQUESTS_FIELD_NUMBER; hash = (53 * hash) + org.apache.hbase.thirdparty.com.google.protobuf.Internal.hashLong( getNumberOfRequests()); if (hasTotalNumberOfRequests()) { hash = (37 * hash) + TOTAL_NUMBER_OF_REQUESTS_FIELD_NUMBER; hash = (53 * hash) + org.apache.hbase.thirdparty.com.google.protobuf.Internal.hashLong( getTotalNumberOfRequests()); if (hasUsedHeapMB()) { hash = (37 * hash) + USED_HEAP_MB_FIELD_NUMBER; hash = (53 * hash) + getUsedHeapMB(); if (hasMaxHeapMB()) { hash = (37 * hash) + MAX_HEAP_MB_FIELD_NUMBER; hash = (53 * hash) + getMaxHeapMB(); if (getRegionLoadsCount() > 0) { hash = (37 * hash) + REGION_LOADS_FIELD_NUMBER; hash = (53 * hash) + getRegionLoadsList().hashCode(); if (getCoprocessorsCount() > 0) { hash = (37 * hash) + COPROCESSORS_FIELD_NUMBER; hash = (53 * hash) + getCoprocessorsList().hashCode(); if (hasReportStartTime()) { hash = (37 * hash) + REPORT_START_TIME_FIELD_NUMBER;
result = result && (hasNumberOfRequests() == other.hasNumberOfRequests()); if (hasNumberOfRequests()) { result = result && (getNumberOfRequests() == other.getNumberOfRequests()); result = result && (hasTotalNumberOfRequests() == other.hasTotalNumberOfRequests()); if (hasTotalNumberOfRequests()) { result = result && (getTotalNumberOfRequests() == other.getTotalNumberOfRequests()); result = result && (hasUsedHeapMB() == other.hasUsedHeapMB()); if (hasUsedHeapMB()) { result = result && (getUsedHeapMB() == other.getUsedHeapMB()); result = result && (hasMaxHeapMB() == other.hasMaxHeapMB()); if (hasMaxHeapMB()) { result = result && (getMaxHeapMB() == other.getMaxHeapMB()); result = result && getRegionLoadsList() .equals(other.getRegionLoadsList()); result = result && getCoprocessorsList() .equals(other.getCoprocessorsList()); result = result && (hasReportStartTime() == other.hasReportStartTime()); if (hasReportStartTime()) { result = result && (getReportStartTime() == other.getReportStartTime());
output.writeMessage(11, getReplLoadSink());
.setTimeStampOfLastShippedOp(timeStampOfLastShippedOp + 1) .setSizeOfLogQueue(sizeOfLogQueue + 1).build(); ClusterStatusProtos.ServerLoad sl = ClusterStatusProtos.ServerLoad.newBuilder() .addReplLoadSource(rload1).addReplLoadSource(rload2).build(); request.setLoad(sl);
if (isInitialized == 0) return false; for (int i = 0; i < getRegionLoadsCount(); i++) { if (!getRegionLoads(i).isInitialized()) { memoizedIsInitialized = 0; return false; for (int i = 0; i < getCoprocessorsCount(); i++) { if (!getCoprocessors(i).isInitialized()) { memoizedIsInitialized = 0; return false; for (int i = 0; i < getReplLoadSourceCount(); i++) { if (!getReplLoadSource(i).isInitialized()) { memoizedIsInitialized = 0; return false; if (hasReplLoadSink()) { if (!getReplLoadSink().isInitialized()) { memoizedIsInitialized = 0; return false;
ClusterStatusProtos.ServerLoad sl = ClusterStatusProtos.ServerLoad.newBuilder() .setTotalNumberOfRequests(expectedRequestNumber) .build(); sl = ClusterStatusProtos.ServerLoad.newBuilder() .setTotalNumberOfRequests(expectedRequestNumber) .build();
/** * <pre> ** load the server is under * </pre> * * <code>optional .hbase.pb.ServerLoad load = 2;</code> */ public Builder mergeLoad(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.ServerLoad value) { if (loadBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && load_ != null && load_ != org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance()) { load_ = org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.ServerLoad.newBuilder(load_).mergeFrom(value).buildPartial(); } else { load_ = value; } onChanged(); } else { loadBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /**
public static ClusterStatusProtos.ServerLoad toServerLoad(ServerMetrics metrics) { ClusterStatusProtos.ServerLoad.Builder builder = ClusterStatusProtos.ServerLoad.newBuilder() .setNumberOfRequests(metrics.getRequestCountPerSecond()) .setTotalNumberOfRequests(metrics.getRequestCount()) .setInfoServerPort(metrics.getInfoServerPort()) .setMaxHeapMB((int) metrics.getMaxHeapSize().get(Size.Unit.MEGABYTE)) .setUsedHeapMB((int) metrics.getUsedHeapSize().get(Size.Unit.MEGABYTE)) .addAllCoprocessors(toCoprocessor(metrics.getCoprocessorNames())) .addAllRegionLoads(metrics.getRegionMetrics().values().stream() .map(RegionMetricsBuilder::toRegionLoad) .collect(Collectors.toList())) .addAllReplLoadSource(metrics.getReplicationLoadSourceList().stream() .map(ProtobufUtil::toReplicationLoadSource) .collect(Collectors.toList())) .setReportStartTime(metrics.getLastReportTimestamp()) .setReportEndTime(metrics.getReportTimestamp()); if (metrics.getReplicationLoadSink() != null) { builder.setReplLoadSink(ProtobufUtil.toReplicationLoadSink( metrics.getReplicationLoadSink())); } return builder.build(); }
/** * <code>required .hbase.pb.ServerLoad server_load = 2;</code> */ public Builder mergeServerLoad(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.ServerLoad value) { if (serverLoadBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && serverLoad_ != null && serverLoad_ != org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance()) { serverLoad_ = org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.ServerLoad.newBuilder(serverLoad_).mergeFrom(value).buildPartial(); } else { serverLoad_ = value; } onChanged(); } else { serverLoadBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /**
private ClusterStatusProtos.ServerLoad createServerLoadProto() { HBaseProtos.RegionSpecifier rSpecOne = HBaseProtos.RegionSpecifier.newBuilder() .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME) .setValue(ByteString.copyFromUtf8("ASDFGQWERT")).build(); HBaseProtos.RegionSpecifier rSpecTwo = HBaseProtos.RegionSpecifier.newBuilder() .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME) .setValue(ByteString.copyFromUtf8("QWERTYUIOP")).build(); ClusterStatusProtos.RegionLoad rlOne = ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10) .setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520) .setFilteredReadRequestsCount(100).setStorefileIndexSizeKB(42).setRootIndexSizeKB(201) .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE) .build(); ClusterStatusProtos.RegionLoad rlTwo = ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3) .setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300) .setFilteredReadRequestsCount(200).setStorefileIndexSizeKB(40).setRootIndexSizeKB(303) .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE) .build(); ClusterStatusProtos.ServerLoad sl = ClusterStatusProtos.ServerLoad.newBuilder().addRegionLoads(rlOne). addRegionLoads(rlTwo).build(); return sl; }
private ClusterStatusProtos.ServerLoad createServerLoadProto() { HBaseProtos.RegionSpecifier rSpecOne = HBaseProtos.RegionSpecifier.newBuilder() .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME) .setValue(ByteString.copyFromUtf8("ASDFGQWERT")).build(); HBaseProtos.RegionSpecifier rSpecTwo = HBaseProtos.RegionSpecifier.newBuilder() .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME) .setValue(ByteString.copyFromUtf8("QWERTYUIOP")).build(); ClusterStatusProtos.RegionLoad rlOne = ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10) .setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520) .setFilteredReadRequestsCount(100).setStorefileIndexSizeKB(42).setRootIndexSizeKB(201) .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build(); ClusterStatusProtos.RegionLoad rlTwo = ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3) .setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300) .setFilteredReadRequestsCount(200).setStorefileIndexSizeKB(40).setRootIndexSizeKB(303) .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build(); ClusterStatusProtos.ServerLoad sl = ClusterStatusProtos.ServerLoad.newBuilder().addRegionLoads(rlOne). addRegionLoads(rlTwo).build(); return sl; }
public static ServerMetrics toServerMetrics(ServerName serverName, ClusterStatusProtos.ServerLoad serverLoadPB) { return ServerMetricsBuilder.newBuilder(serverName) .setRequestCountPerSecond(serverLoadPB.getNumberOfRequests()) .setRequestCount(serverLoadPB.getTotalNumberOfRequests()) .setInfoServerPort(serverLoadPB.getInfoServerPort()) .setMaxHeapSize(new Size(serverLoadPB.getMaxHeapMB(), Size.Unit.MEGABYTE)) .setUsedHeapSize(new Size(serverLoadPB.getUsedHeapMB(), Size.Unit.MEGABYTE)) .setCoprocessorNames(serverLoadPB.getCoprocessorsList().stream() .map(HBaseProtos.Coprocessor::getName).collect(Collectors.toList())) .setRegionMetrics(serverLoadPB.getRegionLoadsList().stream() .map(RegionMetricsBuilder::toRegionMetrics) .collect(Collectors.toList())) .setReplicationLoadSources(serverLoadPB.getReplLoadSourceList().stream() .map(ProtobufUtil::toReplicationLoadSource) .collect(Collectors.toList())) .setReplicationLoadSink(serverLoadPB.hasReplLoadSink() ? ProtobufUtil.toReplicationLoadSink(serverLoadPB.getReplLoadSink()) : null) .setReportTimestamp(serverLoadPB.getReportEndTime()) .setLastReportTimestamp(serverLoadPB.getReportStartTime()) .build(); }
public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() {
private RegionServerStatusProtos.RegionServerReportRequest.Builder makeRSReportRequestWithRegions(final ServerName sn, HRegionInfo... regions) { ClusterStatusProtos.ServerLoad.Builder sl = ClusterStatusProtos.ServerLoad.newBuilder(); for (int i = 0; i < regions.length; i++) { HBaseProtos.RegionSpecifier.Builder rs = HBaseProtos.RegionSpecifier.newBuilder(); rs.setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME); rs.setValue(UnsafeByteOperations.unsafeWrap(regions[i].getRegionName())); ClusterStatusProtos.RegionLoad.Builder rl = ClusterStatusProtos.RegionLoad.newBuilder() .setRegionSpecifier(rs.build()); sl.addRegionLoads(i, rl.build()); } return RegionServerStatusProtos.RegionServerReportRequest.newBuilder() .setServer(ProtobufUtil.toServerName(sn)) .setLoad(sl); }