Tabnine Logo
ClusterStatusProtos$RegionLoad$Builder
Code IndexAdd Tabnine to your IDE (free)

How to use
ClusterStatusProtos$RegionLoad$Builder
in
org.apache.hadoop.hbase.shaded.protobuf.generated

Best Java code snippets using org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos$RegionLoad$Builder (Showing top 20 results out of 315)

origin: apache/hbase

/**
 * <pre>
 ** Information on the load of individual regions. 
 * </pre>
 *
 * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
 */
public Builder addRegionLoads(
  int index, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) {
 if (regionLoadsBuilder_ == null) {
  ensureRegionLoadsIsMutable();
  regionLoads_.add(index, builderForValue.build());
  onChanged();
 } else {
  regionLoadsBuilder_.addMessage(index, builderForValue.build());
 }
 return this;
}
/**
origin: org.apache.hbase/hbase-protocol-shaded

public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad other) {
 if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.getDefaultInstance()) return this;
 if (other.hasRegionSpecifier()) {
  mergeRegionSpecifier(other.getRegionSpecifier());
  setStores(other.getStores());
  setStorefiles(other.getStorefiles());
  setStoreUncompressedSizeMB(other.getStoreUncompressedSizeMB());
  setStorefileSizeMB(other.getStorefileSizeMB());
  setMemStoreSizeMB(other.getMemStoreSizeMB());
  setStorefileIndexSizeKB(other.getStorefileIndexSizeKB());
  setReadRequestsCount(other.getReadRequestsCount());
  setWriteRequestsCount(other.getWriteRequestsCount());
  setTotalCompactingKVs(other.getTotalCompactingKVs());
  setCurrentCompactedKVs(other.getCurrentCompactedKVs());
  setRootIndexSizeKB(other.getRootIndexSizeKB());
  setTotalStaticIndexSizeKB(other.getTotalStaticIndexSizeKB());
  setTotalStaticBloomSizeKB(other.getTotalStaticBloomSizeKB());
origin: com.aliyun.hbase/alihbase-client

public static ClusterStatusProtos.RegionLoad toRegionLoad(RegionMetrics regionMetrics) {
 return ClusterStatusProtos.RegionLoad.newBuilder()
   .setRegionSpecifier(HBaseProtos.RegionSpecifier
    .newBuilder().setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME)
    .setValue(UnsafeByteOperations.unsafeWrap(regionMetrics.getRegionName()))
    .build())
   .setTotalStaticBloomSizeKB((int) regionMetrics.getBloomFilterSize()
    .get(Size.Unit.KILOBYTE))
   .setCurrentCompactedKVs(regionMetrics.getCompactedCellCount())
   .setTotalCompactingKVs(regionMetrics.getCompactingCellCount())
   .setCompleteSequenceId(regionMetrics.getCompletedSequenceId())
   .setDataLocality(regionMetrics.getDataLocality())
   .setFilteredReadRequestsCount(regionMetrics.getFilteredReadRequestCount())
   .setTotalStaticIndexSizeKB((int) regionMetrics.getStoreFileUncompressedDataIndexSize()
    .get(Size.Unit.KILOBYTE))
   .setLastMajorCompactionTs(regionMetrics.getLastMajorCompactionTimestamp())
   .setMemStoreSizeMB((int) regionMetrics.getMemStoreSize().get(Size.Unit.MEGABYTE))
   .setReadRequestsCount(regionMetrics.getReadRequestCount())
   .setWriteRequestsCount(regionMetrics.getWriteRequestCount())
   .setStorefileIndexSizeKB((long) regionMetrics.getStoreFileIndexSize()
    .get(Size.Unit.KILOBYTE))
   .setRootIndexSizeKB((int) regionMetrics.getStoreFileRootLevelIndexSize()
    .get(Size.Unit.KILOBYTE))
   .setStores(regionMetrics.getStoreCount())
   .setStorefiles(regionMetrics.getStoreCount())
   .setStorefileSizeMB((int) regionMetrics.getStoreFileSize().get(Size.Unit.MEGABYTE))
   .addAllStoreCompleteSequenceId(toStoreSequenceId(regionMetrics.getStoreSequenceId()))
   .setStoreUncompressedSizeMB(
    (int) regionMetrics.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE))
   .build();
origin: org.apache.hbase/hbase-protocol-shaded

/**
 * <pre>
 ** Information on the load of individual regions. 
 * </pre>
 *
 * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
 */
public Builder setRegionLoads(
  int index, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) {
 if (regionLoadsBuilder_ == null) {
  ensureRegionLoadsIsMutable();
  regionLoads_.set(index, builderForValue.build());
  onChanged();
 } else {
  regionLoadsBuilder_.setMessage(index, builderForValue.build());
 }
 return this;
}
/**
origin: org.apache.hbase/hbase-protocol-shaded

/**
 * <pre>
 ** Information on the load of individual regions. 
 * </pre>
 *
 * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
 */
public Builder addRegionLoads(
  int index, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) {
 if (regionLoadsBuilder_ == null) {
  ensureRegionLoadsIsMutable();
  regionLoads_.add(index, builderForValue.build());
  onChanged();
 } else {
  regionLoadsBuilder_.addMessage(index, builderForValue.build());
 }
 return this;
}
/**
origin: org.apache.hbase/hbase-protocol-shaded

/**
 * <pre>
 ** Information on the load of individual regions. 
 * </pre>
 *
 * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
 */
public Builder addRegionLoads(
  org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) {
 if (regionLoadsBuilder_ == null) {
  ensureRegionLoadsIsMutable();
  regionLoads_.add(builderForValue.build());
  onChanged();
 } else {
  regionLoadsBuilder_.addMessage(builderForValue.build());
 }
 return this;
}
/**
origin: org.apache.hbase/hbase-server

private ClusterStatusProtos.ServerLoad createServerLoadProto() {
 HBaseProtos.RegionSpecifier rSpecOne = HBaseProtos.RegionSpecifier.newBuilder()
   .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
   .setValue(ByteString.copyFromUtf8("ASDFGQWERT")).build();
 HBaseProtos.RegionSpecifier rSpecTwo = HBaseProtos.RegionSpecifier.newBuilder()
   .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
   .setValue(ByteString.copyFromUtf8("QWERTYUIOP")).build();
 ClusterStatusProtos.RegionLoad rlOne =
   ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10)
     .setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520)
     .setFilteredReadRequestsCount(100).setStorefileIndexSizeKB(42).setRootIndexSizeKB(201)
     .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE)
     .build();
 ClusterStatusProtos.RegionLoad rlTwo =
   ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3)
     .setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300)
     .setFilteredReadRequestsCount(200).setStorefileIndexSizeKB(40).setRootIndexSizeKB(303)
     .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE)
     .build();
 ClusterStatusProtos.ServerLoad sl =
   ClusterStatusProtos.ServerLoad.newBuilder().addRegionLoads(rlOne).
     addRegionLoads(rlTwo).build();
 return sl;
}
origin: org.apache.hbase/hbase-server

private ClusterStatusProtos.ServerLoad createServerLoadProto() {
 HBaseProtos.RegionSpecifier rSpecOne = HBaseProtos.RegionSpecifier.newBuilder()
  .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
  .setValue(ByteString.copyFromUtf8("ASDFGQWERT")).build();
 HBaseProtos.RegionSpecifier rSpecTwo = HBaseProtos.RegionSpecifier.newBuilder()
  .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
  .setValue(ByteString.copyFromUtf8("QWERTYUIOP")).build();
 ClusterStatusProtos.RegionLoad rlOne =
  ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10)
   .setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520)
   .setFilteredReadRequestsCount(100).setStorefileIndexSizeKB(42).setRootIndexSizeKB(201)
   .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build();
 ClusterStatusProtos.RegionLoad rlTwo =
  ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3)
   .setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300)
   .setFilteredReadRequestsCount(200).setStorefileIndexSizeKB(40).setRootIndexSizeKB(303)
   .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build();
 ClusterStatusProtos.ServerLoad sl =
  ClusterStatusProtos.ServerLoad.newBuilder().addRegionLoads(rlOne).
   addRegionLoads(rlTwo).build();
 return sl;
}
origin: org.apache.hbase/hbase-protocol-shaded

/**
 * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code>
 */
public Builder setRegionLoads(
  int index, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) {
 if (regionLoadsBuilder_ == null) {
  ensureRegionLoadsIsMutable();
  regionLoads_.set(index, builderForValue.build());
  onChanged();
 } else {
  regionLoadsBuilder_.setMessage(index, builderForValue.build());
 }
 return this;
}
/**
origin: org.apache.hbase/hbase-server

private RegionServerStatusProtos.RegionServerReportRequest.Builder
  makeRSReportRequestWithRegions(final ServerName sn, HRegionInfo... regions) {
 ClusterStatusProtos.ServerLoad.Builder sl = ClusterStatusProtos.ServerLoad.newBuilder();
 for (int i = 0; i < regions.length; i++) {
  HBaseProtos.RegionSpecifier.Builder rs = HBaseProtos.RegionSpecifier.newBuilder();
  rs.setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME);
  rs.setValue(UnsafeByteOperations.unsafeWrap(regions[i].getRegionName()));
  ClusterStatusProtos.RegionLoad.Builder rl = ClusterStatusProtos.RegionLoad.newBuilder()
    .setRegionSpecifier(rs.build());
  sl.addRegionLoads(i, rl.build());
 }
 return RegionServerStatusProtos.RegionServerReportRequest.newBuilder()
      .setServer(ProtobufUtil.toServerName(sn))
      .setLoad(sl);
}
origin: org.apache.hbase/hbase-protocol-shaded

/**
 * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code>
 */
public Builder addRegionLoads(
  int index, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) {
 if (regionLoadsBuilder_ == null) {
  ensureRegionLoadsIsMutable();
  regionLoads_.add(index, builderForValue.build());
  onChanged();
 } else {
  regionLoadsBuilder_.addMessage(index, builderForValue.build());
 }
 return this;
}
/**
origin: org.apache.hbase/hbase-protocol-shaded

/**
 * <code>repeated .hbase.pb.RegionLoad region_loads = 1;</code>
 */
public Builder addRegionLoads(
  org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) {
 if (regionLoadsBuilder_ == null) {
  ensureRegionLoadsIsMutable();
  regionLoads_.add(builderForValue.build());
  onChanged();
 } else {
  regionLoadsBuilder_.addMessage(builderForValue.build());
 }
 return this;
}
/**
origin: org.apache.hbase/hbase-client

public static ClusterStatusProtos.RegionLoad toRegionLoad(RegionMetrics regionMetrics) {
 return ClusterStatusProtos.RegionLoad.newBuilder()
   .setRegionSpecifier(HBaseProtos.RegionSpecifier
    .newBuilder().setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME)
    .setValue(UnsafeByteOperations.unsafeWrap(regionMetrics.getRegionName()))
    .build())
   .setTotalStaticBloomSizeKB((int) regionMetrics.getBloomFilterSize()
    .get(Size.Unit.KILOBYTE))
   .setCurrentCompactedKVs(regionMetrics.getCompactedCellCount())
   .setTotalCompactingKVs(regionMetrics.getCompactingCellCount())
   .setCompleteSequenceId(regionMetrics.getCompletedSequenceId())
   .setDataLocality(regionMetrics.getDataLocality())
   .setFilteredReadRequestsCount(regionMetrics.getFilteredReadRequestCount())
   .setTotalStaticIndexSizeKB((int) regionMetrics.getStoreFileUncompressedDataIndexSize()
    .get(Size.Unit.KILOBYTE))
   .setLastMajorCompactionTs(regionMetrics.getLastMajorCompactionTimestamp())
   .setMemStoreSizeMB((int) regionMetrics.getMemStoreSize().get(Size.Unit.MEGABYTE))
   .setReadRequestsCount(regionMetrics.getReadRequestCount())
   .setWriteRequestsCount(regionMetrics.getWriteRequestCount())
   .setStorefileIndexSizeKB((long) regionMetrics.getStoreFileIndexSize()
    .get(Size.Unit.KILOBYTE))
   .setRootIndexSizeKB((int) regionMetrics.getStoreFileRootLevelIndexSize()
    .get(Size.Unit.KILOBYTE))
   .setStores(regionMetrics.getStoreCount())
   .setStorefiles(regionMetrics.getStoreCount())
   .setStorefileSizeMB((int) regionMetrics.getStoreFileSize().get(Size.Unit.MEGABYTE))
   .addAllStoreCompleteSequenceId(toStoreSequenceId(regionMetrics.getStoreSequenceId()))
   .setStoreUncompressedSizeMB(
    (int) regionMetrics.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE))
   .build();
origin: apache/hbase

public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad other) {
 if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.getDefaultInstance()) return this;
 if (other.hasRegionSpecifier()) {
  mergeRegionSpecifier(other.getRegionSpecifier());
  setStores(other.getStores());
  setStorefiles(other.getStorefiles());
  setStoreUncompressedSizeMB(other.getStoreUncompressedSizeMB());
  setStorefileSizeMB(other.getStorefileSizeMB());
  setMemStoreSizeMB(other.getMemStoreSizeMB());
  setStorefileIndexSizeKB(other.getStorefileIndexSizeKB());
  setReadRequestsCount(other.getReadRequestsCount());
  setWriteRequestsCount(other.getWriteRequestsCount());
  setTotalCompactingKVs(other.getTotalCompactingKVs());
  setCurrentCompactedKVs(other.getCurrentCompactedKVs());
  setRootIndexSizeKB(other.getRootIndexSizeKB());
  setTotalStaticIndexSizeKB(other.getTotalStaticIndexSizeKB());
  setTotalStaticBloomSizeKB(other.getTotalStaticBloomSizeKB());
origin: apache/hbase

regionLoadBldr.setRegionSpecifier(regionSpecifier.build())
 .setStores(stores)
 .setStorefiles(storefiles)
 .setStoreUncompressedSizeMB(storeUncompressedSizeMB)
 .setStorefileSizeMB(storefileSizeMB)
 .setMemStoreSizeMB(memstoreSizeMB)
 .setStorefileIndexSizeKB(storefileIndexSizeKB)
 .setRootIndexSizeKB(rootLevelIndexSizeKB)
 .setTotalStaticIndexSizeKB(totalStaticIndexSizeKB)
 .setTotalStaticBloomSizeKB(totalStaticBloomSizeKB)
 .setReadRequestsCount(r.getReadRequestsCount())
 .setCpRequestsCount(r.getCpRequestsCount())
 .setFilteredReadRequestsCount(r.getFilteredReadRequestsCount())
 .setWriteRequestsCount(r.getWriteRequestsCount())
 .setTotalCompactingKVs(totalCompactingKVs)
 .setCurrentCompactedKVs(currentCompactedKVs)
 .setDataLocality(dataLocality)
 .setLastMajorCompactionTs(r.getOldestHfileTs(true));
r.setCompleteSequenceId(regionLoadBldr);
return regionLoadBldr.build();
origin: apache/hbase

public static ClusterStatusProtos.RegionLoad toRegionLoad(RegionMetrics regionMetrics) {
 return ClusterStatusProtos.RegionLoad.newBuilder()
   .setRegionSpecifier(HBaseProtos.RegionSpecifier
    .newBuilder().setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME)
    .setValue(UnsafeByteOperations.unsafeWrap(regionMetrics.getRegionName()))
    .build())
   .setTotalStaticBloomSizeKB((int) regionMetrics.getBloomFilterSize()
    .get(Size.Unit.KILOBYTE))
   .setCurrentCompactedKVs(regionMetrics.getCompactedCellCount())
   .setTotalCompactingKVs(regionMetrics.getCompactingCellCount())
   .setCompleteSequenceId(regionMetrics.getCompletedSequenceId())
   .setDataLocality(regionMetrics.getDataLocality())
   .setFilteredReadRequestsCount(regionMetrics.getFilteredReadRequestCount())
   .setTotalStaticIndexSizeKB((int) regionMetrics.getStoreFileUncompressedDataIndexSize()
    .get(Size.Unit.KILOBYTE))
   .setLastMajorCompactionTs(regionMetrics.getLastMajorCompactionTimestamp())
   .setMemStoreSizeMB((int) regionMetrics.getMemStoreSize().get(Size.Unit.MEGABYTE))
   .setReadRequestsCount(regionMetrics.getReadRequestCount())
   .setCpRequestsCount(regionMetrics.getCpRequestCount())
   .setWriteRequestsCount(regionMetrics.getWriteRequestCount())
   .setStorefileIndexSizeKB((long) regionMetrics.getStoreFileIndexSize()
    .get(Size.Unit.KILOBYTE))
   .setRootIndexSizeKB((int) regionMetrics.getStoreFileRootLevelIndexSize()
    .get(Size.Unit.KILOBYTE))
   .setStores(regionMetrics.getStoreCount())
   .setStorefiles(regionMetrics.getStoreCount())
   .setStorefileSizeMB((int) regionMetrics.getStoreFileSize().get(Size.Unit.MEGABYTE))
   .addAllStoreCompleteSequenceId(toStoreSequenceId(regionMetrics.getStoreSequenceId()))
   .setStoreUncompressedSizeMB(
    (int) regionMetrics.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE))
origin: apache/hbase

/**
 * <pre>
 ** Information on the load of individual regions. 
 * </pre>
 *
 * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
 */
public Builder addRegionLoads(
  org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) {
 if (regionLoadsBuilder_ == null) {
  ensureRegionLoadsIsMutable();
  regionLoads_.add(builderForValue.build());
  onChanged();
 } else {
  regionLoadsBuilder_.addMessage(builderForValue.build());
 }
 return this;
}
/**
origin: apache/hbase

private ClusterStatusProtos.ServerLoad createServerLoadProto() {
 HBaseProtos.RegionSpecifier rSpecOne = HBaseProtos.RegionSpecifier.newBuilder()
   .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
   .setValue(ByteString.copyFromUtf8("ASDFGQWERT")).build();
 HBaseProtos.RegionSpecifier rSpecTwo = HBaseProtos.RegionSpecifier.newBuilder()
   .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
   .setValue(ByteString.copyFromUtf8("QWERTYUIOP")).build();
 ClusterStatusProtos.RegionLoad rlOne =
   ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10)
     .setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520)
     .setFilteredReadRequestsCount(100).setStorefileIndexSizeKB(42).setRootIndexSizeKB(201)
     .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE)
     .build();
 ClusterStatusProtos.RegionLoad rlTwo =
   ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3)
     .setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300)
     .setFilteredReadRequestsCount(200).setStorefileIndexSizeKB(40).setRootIndexSizeKB(303)
     .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE)
     .setCpRequestsCount(100)
     .build();
 ClusterStatusProtos.ServerLoad sl =
   ClusterStatusProtos.ServerLoad.newBuilder().addRegionLoads(rlOne).
     addRegionLoads(rlTwo).build();
 return sl;
}
origin: apache/hbase

/**
 * <pre>
 ** Information on the load of individual regions. 
 * </pre>
 *
 * <code>repeated .hbase.pb.RegionLoad region_loads = 5;</code>
 */
public Builder setRegionLoads(
  int index, org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) {
 if (regionLoadsBuilder_ == null) {
  ensureRegionLoadsIsMutable();
  regionLoads_.set(index, builderForValue.build());
  onChanged();
 } else {
  regionLoadsBuilder_.setMessage(index, builderForValue.build());
 }
 return this;
}
/**
origin: apache/hbase

private ClusterStatusProtos.ServerLoad createServerLoadProto() {
 HBaseProtos.RegionSpecifier rSpecOne = HBaseProtos.RegionSpecifier.newBuilder()
  .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
  .setValue(ByteString.copyFromUtf8("ASDFGQWERT")).build();
 HBaseProtos.RegionSpecifier rSpecTwo = HBaseProtos.RegionSpecifier.newBuilder()
  .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
  .setValue(ByteString.copyFromUtf8("QWERTYUIOP")).build();
 ClusterStatusProtos.RegionLoad rlOne =
  ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10)
   .setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520)
   .setFilteredReadRequestsCount(100).setStorefileIndexSizeKB(42).setRootIndexSizeKB(201)
   .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build();
 ClusterStatusProtos.RegionLoad rlTwo =
  ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3)
   .setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300)
   .setFilteredReadRequestsCount(200).setStorefileIndexSizeKB(40).setRootIndexSizeKB(303)
   .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build();
 ClusterStatusProtos.ServerLoad sl =
  ClusterStatusProtos.ServerLoad.newBuilder().addRegionLoads(rlOne).
   addRegionLoads(rlTwo).build();
 return sl;
}
org.apache.hadoop.hbase.shaded.protobuf.generatedClusterStatusProtos$RegionLoad$Builder

Javadoc

Protobuf type hbase.pb.RegionLoad

Most used methods

  • build
  • setFilteredReadRequestsCount
    the current total filtered read requests made to region optional uint64 filtered_read_requests
  • setReadRequestsCount
    the current total read requests made to region optional uint64 read_requests_count = 8;
  • setRootIndexSizeKB
    The current total size of root-level indexes for the region, in KB. optional uint32 root_index
  • setStoreUncompressedSizeMB
    the total size of the store files for the region, uncompressed, in MB optional uint32 store_un
  • setStorefileIndexSizeKB
    The current total size of root-level store file indexes for the region, in KB. The same as {@link
  • setStorefileSizeMB
    the current total size of the store files for the region, in MB optional uint32 storefile_size
  • setStorefiles
    the number of storefiles for the region optional uint32 storefiles = 3;
  • setStores
    the number of stores for the region optional uint32 stores = 2;
  • setWriteRequestsCount
    the current total write requests made to region optional uint64 write_requests_count = 9;
  • setCompleteSequenceId
    the most recent sequence Id from cache flush optional uint64 complete_sequence_id = 15;
  • setCpRequestsCount
    the current total coprocessor requests made to region optional uint64 cp_requests_count = 20;
  • setCompleteSequenceId,
  • setCpRequestsCount,
  • setCurrentCompactedKVs,
  • setDataLocality,
  • setLastMajorCompactionTs,
  • setMemStoreSizeMB,
  • setRegionSpecifier,
  • setTotalCompactingKVs,
  • setTotalStaticBloomSizeKB,
  • setTotalStaticIndexSizeKB

Popular in Java

  • Running tasks concurrently on multiple threads
  • getOriginalFilename (MultipartFile)
    Return the original filename in the client's filesystem.This may contain path information depending
  • getResourceAsStream (ClassLoader)
  • findViewById (Activity)
  • Path (java.nio.file)
  • ResultSet (java.sql)
    An interface for an object which represents a database table entry, returned as the result of the qu
  • BlockingQueue (java.util.concurrent)
    A java.util.Queue that additionally supports operations that wait for the queue to become non-empty
  • HttpServlet (javax.servlet.http)
    Provides an abstract class to be subclassed to create an HTTP servlet suitable for a Web site. A sub
  • Table (org.hibernate.mapping)
    A relational table
  • Runner (org.openjdk.jmh.runner)
  • Github Copilot alternatives
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now