Tabnine Logo
ClusterStatusProtos$ServerLoad.newBuilder
Code IndexAdd Tabnine to your IDE (free)

How to use
newBuilder
method
in
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos$ServerLoad

Best Java code snippets using org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos$ServerLoad.newBuilder (Showing top 20 results out of 315)

origin: apache/hbase

public static ClusterStatusProtos.ServerLoad toServerLoad(ServerMetrics metrics) {
 ClusterStatusProtos.ServerLoad.Builder builder = ClusterStatusProtos.ServerLoad.newBuilder()
   .setNumberOfRequests(metrics.getRequestCountPerSecond())
   .setTotalNumberOfRequests(metrics.getRequestCount())
   .setInfoServerPort(metrics.getInfoServerPort())
   .setMaxHeapMB((int) metrics.getMaxHeapSize().get(Size.Unit.MEGABYTE))
   .setUsedHeapMB((int) metrics.getUsedHeapSize().get(Size.Unit.MEGABYTE))
   .addAllCoprocessors(toCoprocessor(metrics.getCoprocessorNames()))
   .addAllRegionLoads(metrics.getRegionMetrics().values().stream()
     .map(RegionMetricsBuilder::toRegionLoad)
     .collect(Collectors.toList()))
   .addAllReplLoadSource(metrics.getReplicationLoadSourceList().stream()
     .map(ProtobufUtil::toReplicationLoadSource)
     .collect(Collectors.toList()))
   .setReportStartTime(metrics.getLastReportTimestamp())
   .setReportEndTime(metrics.getReportTimestamp());
 if (metrics.getReplicationLoadSink() != null) {
  builder.setReplLoadSink(ProtobufUtil.toReplicationLoadSink(
    metrics.getReplicationLoadSink()));
 }
 return builder.build();
}
origin: apache/hbase

ClusterStatusProtos.ServerLoad.Builder serverLoad = ClusterStatusProtos.ServerLoad.newBuilder();
serverLoad.setNumberOfRequests((int) regionServerWrapper.getRequestsPerSecond());
serverLoad.setTotalNumberOfRequests(regionServerWrapper.getTotalRequestCount());
origin: apache/hbase

    .setTimeStampOfLastShippedOp(timeStampOfLastShippedOp + 1)
    .setSizeOfLogQueue(sizeOfLogQueue + 1).build();
ClusterStatusProtos.ServerLoad sl = ClusterStatusProtos.ServerLoad.newBuilder()
  .addReplLoadSource(rload1).addReplLoadSource(rload2).build();
request.setLoad(sl);
origin: org.apache.hbase/hbase-server

ClusterStatusProtos.ServerLoad sl = ClusterStatusProtos.ServerLoad.newBuilder()
                    .setTotalNumberOfRequests(expectedRequestNumber)
                    .build();
sl = ClusterStatusProtos.ServerLoad.newBuilder()
  .setTotalNumberOfRequests(expectedRequestNumber)
  .build();
origin: org.apache.hbase/hbase-server

    .setTimeStampOfLastShippedOp(timeStampOfLastShippedOp + 1)
    .setSizeOfLogQueue(sizeOfLogQueue + 1).build();
ClusterStatusProtos.ServerLoad sl = ClusterStatusProtos.ServerLoad.newBuilder()
  .addReplLoadSource(rload1).addReplLoadSource(rload2).build();
request.setLoad(sl);
origin: org.apache.hbase/hbase-protocol-shaded

/**
 * <pre>
 ** load the server is under 
 * </pre>
 *
 * <code>optional .hbase.pb.ServerLoad load = 2;</code>
 */
public Builder mergeLoad(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.ServerLoad value) {
 if (loadBuilder_ == null) {
  if (((bitField0_ & 0x00000002) == 0x00000002) &&
    load_ != null &&
    load_ != org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance()) {
   load_ =
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.ServerLoad.newBuilder(load_).mergeFrom(value).buildPartial();
  } else {
   load_ = value;
  }
  onChanged();
 } else {
  loadBuilder_.mergeFrom(value);
 }
 bitField0_ |= 0x00000002;
 return this;
}
/**
origin: org.apache.hbase/hbase-protocol-shaded

/**
 * <code>required .hbase.pb.ServerLoad server_load = 2;</code>
 */
public Builder mergeServerLoad(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.ServerLoad value) {
 if (serverLoadBuilder_ == null) {
  if (((bitField0_ & 0x00000002) == 0x00000002) &&
    serverLoad_ != null &&
    serverLoad_ != org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance()) {
   serverLoad_ =
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.ServerLoad.newBuilder(serverLoad_).mergeFrom(value).buildPartial();
  } else {
   serverLoad_ = value;
  }
  onChanged();
 } else {
  serverLoadBuilder_.mergeFrom(value);
 }
 bitField0_ |= 0x00000002;
 return this;
}
/**
origin: org.apache.hbase/hbase-server

private ClusterStatusProtos.ServerLoad createServerLoadProto() {
 HBaseProtos.RegionSpecifier rSpecOne = HBaseProtos.RegionSpecifier.newBuilder()
   .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
   .setValue(ByteString.copyFromUtf8("ASDFGQWERT")).build();
 HBaseProtos.RegionSpecifier rSpecTwo = HBaseProtos.RegionSpecifier.newBuilder()
   .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
   .setValue(ByteString.copyFromUtf8("QWERTYUIOP")).build();
 ClusterStatusProtos.RegionLoad rlOne =
   ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10)
     .setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520)
     .setFilteredReadRequestsCount(100).setStorefileIndexSizeKB(42).setRootIndexSizeKB(201)
     .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE)
     .build();
 ClusterStatusProtos.RegionLoad rlTwo =
   ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3)
     .setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300)
     .setFilteredReadRequestsCount(200).setStorefileIndexSizeKB(40).setRootIndexSizeKB(303)
     .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE)
     .build();
 ClusterStatusProtos.ServerLoad sl =
   ClusterStatusProtos.ServerLoad.newBuilder().addRegionLoads(rlOne).
     addRegionLoads(rlTwo).build();
 return sl;
}
origin: org.apache.hbase/hbase-server

private ClusterStatusProtos.ServerLoad createServerLoadProto() {
 HBaseProtos.RegionSpecifier rSpecOne = HBaseProtos.RegionSpecifier.newBuilder()
  .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
  .setValue(ByteString.copyFromUtf8("ASDFGQWERT")).build();
 HBaseProtos.RegionSpecifier rSpecTwo = HBaseProtos.RegionSpecifier.newBuilder()
  .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
  .setValue(ByteString.copyFromUtf8("QWERTYUIOP")).build();
 ClusterStatusProtos.RegionLoad rlOne =
  ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10)
   .setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520)
   .setFilteredReadRequestsCount(100).setStorefileIndexSizeKB(42).setRootIndexSizeKB(201)
   .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build();
 ClusterStatusProtos.RegionLoad rlTwo =
  ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3)
   .setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300)
   .setFilteredReadRequestsCount(200).setStorefileIndexSizeKB(40).setRootIndexSizeKB(303)
   .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build();
 ClusterStatusProtos.ServerLoad sl =
  ClusterStatusProtos.ServerLoad.newBuilder().addRegionLoads(rlOne).
   addRegionLoads(rlTwo).build();
 return sl;
}
origin: com.aliyun.hbase/alihbase-client

public static ClusterStatusProtos.ServerLoad toServerLoad(ServerMetrics metrics) {
 ClusterStatusProtos.ServerLoad.Builder builder = ClusterStatusProtos.ServerLoad.newBuilder()
   .setNumberOfRequests(metrics.getRequestCountPerSecond())
   .setTotalNumberOfRequests(metrics.getRequestCount())
   .setInfoServerPort(metrics.getInfoServerPort())
   .setMaxHeapMB((int) metrics.getMaxHeapSize().get(Size.Unit.MEGABYTE))
   .setUsedHeapMB((int) metrics.getUsedHeapSize().get(Size.Unit.MEGABYTE))
   .addAllCoprocessors(toCoprocessor(metrics.getCoprocessorNames()))
   .addAllRegionLoads(metrics.getRegionMetrics().values().stream()
     .map(RegionMetricsBuilder::toRegionLoad)
     .collect(Collectors.toList()))
   .addAllReplLoadSource(metrics.getReplicationLoadSourceList().stream()
     .map(ProtobufUtil::toReplicationLoadSource)
     .collect(Collectors.toList()))
   .setReportStartTime(metrics.getLastReportTimestamp())
   .setReportEndTime(metrics.getReportTimestamp());
 if (metrics.getReplicationLoadSink() != null) {
  builder.setReplLoadSink(ProtobufUtil.toReplicationLoadSink(
    metrics.getReplicationLoadSink()));
 }
 return builder.build();
}
origin: org.apache.hbase/hbase-protocol-shaded

public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
origin: org.apache.hbase/hbase-server

private RegionServerStatusProtos.RegionServerReportRequest.Builder
  makeRSReportRequestWithRegions(final ServerName sn, HRegionInfo... regions) {
 ClusterStatusProtos.ServerLoad.Builder sl = ClusterStatusProtos.ServerLoad.newBuilder();
 for (int i = 0; i < regions.length; i++) {
  HBaseProtos.RegionSpecifier.Builder rs = HBaseProtos.RegionSpecifier.newBuilder();
  rs.setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME);
  rs.setValue(UnsafeByteOperations.unsafeWrap(regions[i].getRegionName()));
  ClusterStatusProtos.RegionLoad.Builder rl = ClusterStatusProtos.RegionLoad.newBuilder()
    .setRegionSpecifier(rs.build());
  sl.addRegionLoads(i, rl.build());
 }
 return RegionServerStatusProtos.RegionServerReportRequest.newBuilder()
      .setServer(ProtobufUtil.toServerName(sn))
      .setLoad(sl);
}
origin: org.apache.hbase/hbase-client

public static ClusterStatusProtos.ServerLoad toServerLoad(ServerMetrics metrics) {
 ClusterStatusProtos.ServerLoad.Builder builder = ClusterStatusProtos.ServerLoad.newBuilder()
   .setNumberOfRequests(metrics.getRequestCountPerSecond())
   .setTotalNumberOfRequests(metrics.getRequestCount())
   .setInfoServerPort(metrics.getInfoServerPort())
   .setMaxHeapMB((int) metrics.getMaxHeapSize().get(Size.Unit.MEGABYTE))
   .setUsedHeapMB((int) metrics.getUsedHeapSize().get(Size.Unit.MEGABYTE))
   .addAllCoprocessors(toCoprocessor(metrics.getCoprocessorNames()))
   .addAllRegionLoads(metrics.getRegionMetrics().values().stream()
     .map(RegionMetricsBuilder::toRegionLoad)
     .collect(Collectors.toList()))
   .addAllReplLoadSource(metrics.getReplicationLoadSourceList().stream()
     .map(ProtobufUtil::toReplicationLoadSource)
     .collect(Collectors.toList()))
   .setReportStartTime(metrics.getLastReportTimestamp())
   .setReportEndTime(metrics.getReportTimestamp());
 if (metrics.getReplicationLoadSink() != null) {
  builder.setReplLoadSink(ProtobufUtil.toReplicationLoadSink(
    metrics.getReplicationLoadSink()));
 }
 return builder.build();
}
origin: apache/hbase

/**
 * <pre>
 ** load the server is under 
 * </pre>
 *
 * <code>optional .hbase.pb.ServerLoad load = 2;</code>
 */
public Builder mergeLoad(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.ServerLoad value) {
 if (loadBuilder_ == null) {
  if (((bitField0_ & 0x00000002) == 0x00000002) &&
    load_ != null &&
    load_ != org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance()) {
   load_ =
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.ServerLoad.newBuilder(load_).mergeFrom(value).buildPartial();
  } else {
   load_ = value;
  }
  onChanged();
 } else {
  loadBuilder_.mergeFrom(value);
 }
 bitField0_ |= 0x00000002;
 return this;
}
/**
origin: apache/hbase

private ClusterStatusProtos.ServerLoad createServerLoadProto() {
 HBaseProtos.RegionSpecifier rSpecOne = HBaseProtos.RegionSpecifier.newBuilder()
   .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
   .setValue(ByteString.copyFromUtf8("ASDFGQWERT")).build();
 HBaseProtos.RegionSpecifier rSpecTwo = HBaseProtos.RegionSpecifier.newBuilder()
   .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
   .setValue(ByteString.copyFromUtf8("QWERTYUIOP")).build();
 ClusterStatusProtos.RegionLoad rlOne =
   ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10)
     .setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520)
     .setFilteredReadRequestsCount(100).setStorefileIndexSizeKB(42).setRootIndexSizeKB(201)
     .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE)
     .build();
 ClusterStatusProtos.RegionLoad rlTwo =
   ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3)
     .setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300)
     .setFilteredReadRequestsCount(200).setStorefileIndexSizeKB(40).setRootIndexSizeKB(303)
     .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE)
     .setCpRequestsCount(100)
     .build();
 ClusterStatusProtos.ServerLoad sl =
   ClusterStatusProtos.ServerLoad.newBuilder().addRegionLoads(rlOne).
     addRegionLoads(rlTwo).build();
 return sl;
}
origin: apache/hbase

private ClusterStatusProtos.ServerLoad createServerLoadProto() {
 HBaseProtos.RegionSpecifier rSpecOne = HBaseProtos.RegionSpecifier.newBuilder()
  .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
  .setValue(ByteString.copyFromUtf8("ASDFGQWERT")).build();
 HBaseProtos.RegionSpecifier rSpecTwo = HBaseProtos.RegionSpecifier.newBuilder()
  .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
  .setValue(ByteString.copyFromUtf8("QWERTYUIOP")).build();
 ClusterStatusProtos.RegionLoad rlOne =
  ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10)
   .setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520)
   .setFilteredReadRequestsCount(100).setStorefileIndexSizeKB(42).setRootIndexSizeKB(201)
   .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build();
 ClusterStatusProtos.RegionLoad rlTwo =
  ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3)
   .setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300)
   .setFilteredReadRequestsCount(200).setStorefileIndexSizeKB(40).setRootIndexSizeKB(303)
   .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build();
 ClusterStatusProtos.ServerLoad sl =
  ClusterStatusProtos.ServerLoad.newBuilder().addRegionLoads(rlOne).
   addRegionLoads(rlTwo).build();
 return sl;
}
origin: apache/hbase

/**
 * <code>required .hbase.pb.ServerLoad server_load = 2;</code>
 */
public Builder mergeServerLoad(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.ServerLoad value) {
 if (serverLoadBuilder_ == null) {
  if (((bitField0_ & 0x00000002) == 0x00000002) &&
    serverLoad_ != null &&
    serverLoad_ != org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance()) {
   serverLoad_ =
    org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.ServerLoad.newBuilder(serverLoad_).mergeFrom(value).buildPartial();
  } else {
   serverLoad_ = value;
  }
  onChanged();
 } else {
  serverLoadBuilder_.mergeFrom(value);
 }
 bitField0_ |= 0x00000002;
 return this;
}
/**
origin: apache/hbase

@Test
public void testClusterRequests() throws Exception {
 // sending fake request to master to see how metric value has changed
 RegionServerStatusProtos.RegionServerReportRequest.Builder request =
  RegionServerStatusProtos.RegionServerReportRequest.newBuilder();
 ServerName serverName = cluster.getMaster(0).getServerName();
 request.setServer(ProtobufUtil.toServerName(serverName));
 long expectedRequestNumber = 10000;
 MetricsMasterSource masterSource = master.getMasterMetrics().getMetricsSource();
 ClusterStatusProtos.ServerLoad sl = ClusterStatusProtos.ServerLoad.newBuilder()
  .setTotalNumberOfRequests(expectedRequestNumber).build();
 request.setLoad(sl);
 master.getMasterRpcServices().regionServerReport(null, request.build());
 metricsHelper.assertCounter("cluster_requests", expectedRequestNumber, masterSource);
 expectedRequestNumber = 15000;
 sl = ClusterStatusProtos.ServerLoad.newBuilder().setTotalNumberOfRequests(expectedRequestNumber)
  .build();
 request.setLoad(sl);
 master.getMasterRpcServices().regionServerReport(null, request.build());
 metricsHelper.assertCounter("cluster_requests", expectedRequestNumber, masterSource);
}
origin: apache/hbase

private RegionServerStatusProtos.RegionServerReportRequest.Builder
  makeRSReportRequestWithRegions(final ServerName sn, HRegionInfo... regions) {
 ClusterStatusProtos.ServerLoad.Builder sl = ClusterStatusProtos.ServerLoad.newBuilder();
 for (int i = 0; i < regions.length; i++) {
  HBaseProtos.RegionSpecifier.Builder rs = HBaseProtos.RegionSpecifier.newBuilder();
  rs.setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME);
  rs.setValue(UnsafeByteOperations.unsafeWrap(regions[i].getRegionName()));
  ClusterStatusProtos.RegionLoad.Builder rl = ClusterStatusProtos.RegionLoad.newBuilder()
    .setRegionSpecifier(rs.build());
  sl.addRegionLoads(i, rl.build());
 }
 return RegionServerStatusProtos.RegionServerReportRequest.newBuilder()
      .setServer(ProtobufUtil.toServerName(sn))
      .setLoad(sl);
}
origin: apache/hbase

public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
org.apache.hadoop.hbase.shaded.protobuf.generatedClusterStatusProtos$ServerLoadnewBuilder

Popular methods of ClusterStatusProtos$ServerLoad

  • getTotalNumberOfRequests
    Total Number of requests from the start of the region server. optional uint64 total_number_of_
  • getCoprocessorsList
    Regionserver-level coprocessors, e.g., WALObserver implementations. Region-level coprocessors, on
  • getInfoServerPort
    The port number that this region server is hosing an info server on. optional uint32 info_serve
  • getMaxHeapMB
    the maximum allowable size of the heap, in MB. optional uint32 max_heap_MB = 4;
  • getNumberOfRequests
    Number of requests since last report. optional uint64 number_of_requests = 1;
  • getRegionLoadsList
    Information on the load of individual regions. repeated .hbase.pb.RegionLoad region_loads = 5;
  • getReplLoadSink
    The replicationLoadSink for the replication Sink status of this region server. optional .hbase.
  • getReplLoadSourceList
    The replicationLoadSource for the replication Source status of this region server. repeated .hb
  • getReportEndTime
    Time when report was generated. time is measured as the difference, measured in milliseconds, bet
  • getReportStartTime
    Time when incremental (non-total) counts began being calculated (e.g. number_of_requests) time is
  • getUsedHeapMB
    the amount of used heap, in MB. optional uint32 used_heap_MB = 3;
  • hasReplLoadSink
    The replicationLoadSink for the replication Sink status of this region server. optional .hbase.
  • getUsedHeapMB,
  • hasReplLoadSink,
  • <init>,
  • equals,
  • getCoprocessors,
  • getCoprocessorsCount,
  • getDefaultInstance,
  • getDescriptor,
  • getRegionLoads

Popular in Java

  • Finding current android device location
  • findViewById (Activity)
  • putExtra (Intent)
  • notifyDataSetChanged (ArrayAdapter)
  • Container (java.awt)
    A generic Abstract Window Toolkit(AWT) container object is a component that can contain other AWT co
  • SocketException (java.net)
    This SocketException may be thrown during socket creation or setting options, and is the superclass
  • PriorityQueue (java.util)
    A PriorityQueue holds elements on a priority heap, which orders the elements according to their natu
  • ResourceBundle (java.util)
    ResourceBundle is an abstract class which is the superclass of classes which provide Locale-specifi
  • JCheckBox (javax.swing)
  • Option (scala)
  • Best plugins for Eclipse
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now