Tabnine Logo
ClusterStatusProtos$RegionLoad
Code IndexAdd Tabnine to your IDE (free)

How to use
ClusterStatusProtos$RegionLoad
in
org.apache.hadoop.hbase.shaded.protobuf.generated

Best Java code snippets using org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos$RegionLoad (Showing top 20 results out of 315)

origin: apache/hbase

 serverLoad.addCoprocessors(coprocessorBuilder.setName(coprocessor).build());
RegionLoad.Builder regionLoadBldr = RegionLoad.newBuilder();
RegionSpecifier.Builder regionSpecifier = RegionSpecifier.newBuilder();
for (HRegion region : regions) {
origin: apache/hbase

  String compactTime = "";
  if  (load != null) {
   if (load.getTotalCompactingKVs() > 0) {
    percentDone = String.format("%.2f", 100 *
      ((float) load.getCurrentCompactedKVs() / load.getTotalCompactingKVs())) + "%";
   if (load.getLastMajorCompactionTs() > 0) {
    FastDateFormat fdf = FastDateFormat.getInstance("yyyy-MM-dd HH:mm ZZ");
    compactTime = fdf.format(load.getLastMajorCompactionTs());
org.jamon.escaping.Escaping.HTML.write(org.jamon.emit.StandardEmitter.valueOf(load.getTotalCompactingKVs()), jamonWriter);
org.jamon.escaping.Escaping.HTML.write(org.jamon.emit.StandardEmitter.valueOf(load.getCurrentCompactedKVs()), jamonWriter);
origin: apache/hbase

org.jamon.escaping.Escaping.HTML.write(org.jamon.emit.StandardEmitter.valueOf(load.getStores()), jamonWriter);
org.jamon.escaping.Escaping.HTML.write(org.jamon.emit.StandardEmitter.valueOf(load.getStorefiles()), jamonWriter);
org.jamon.escaping.Escaping.HTML.write(org.jamon.emit.StandardEmitter.valueOf(TraditionalBinaryPrefix.long2String(load.getStoreUncompressedSizeMB()
    * TraditionalBinaryPrefix.MEGA.value, "B", 1)), jamonWriter);
org.jamon.escaping.Escaping.HTML.write(org.jamon.emit.StandardEmitter.valueOf(TraditionalBinaryPrefix.long2String(load.getStorefileSizeMB()
    * TraditionalBinaryPrefix.MEGA.value, "B", 1)), jamonWriter);
org.jamon.escaping.Escaping.HTML.write(org.jamon.emit.StandardEmitter.valueOf(TraditionalBinaryPrefix.long2String(load.getTotalStaticIndexSizeKB()
    * TraditionalBinaryPrefix.KILO.value, "B", 1)), jamonWriter);
org.jamon.escaping.Escaping.HTML.write(org.jamon.emit.StandardEmitter.valueOf(TraditionalBinaryPrefix.long2String(load.getTotalStaticBloomSizeKB()
    * TraditionalBinaryPrefix.KILO.value, "B", 1)), jamonWriter);
org.jamon.escaping.Escaping.HTML.write(org.jamon.emit.StandardEmitter.valueOf(load.getDataLocality()), jamonWriter);
origin: org.apache.hbase/hbase-protocol-shaded

result = result && (hasRegionSpecifier() == other.hasRegionSpecifier());
if (hasRegionSpecifier()) {
 result = result && getRegionSpecifier()
   .equals(other.getRegionSpecifier());
result = result && (hasStores() == other.hasStores());
if (hasStores()) {
 result = result && (getStores()
   == other.getStores());
result = result && (hasStorefiles() == other.hasStorefiles());
if (hasStorefiles()) {
 result = result && (getStorefiles()
   == other.getStorefiles());
result = result && (hasStoreUncompressedSizeMB() == other.hasStoreUncompressedSizeMB());
if (hasStoreUncompressedSizeMB()) {
 result = result && (getStoreUncompressedSizeMB()
   == other.getStoreUncompressedSizeMB());
result = result && (hasStorefileSizeMB() == other.hasStorefileSizeMB());
if (hasStorefileSizeMB()) {
 result = result && (getStorefileSizeMB()
   == other.getStorefileSizeMB());
result = result && (hasMemStoreSizeMB() == other.hasMemStoreSizeMB());
if (hasMemStoreSizeMB()) {
 result = result && (getMemStoreSizeMB()
   == other.getMemStoreSizeMB());
origin: org.apache.hbase/hbase-protocol-shaded

hash = (19 * hash) + getDescriptor().hashCode();
if (hasRegionSpecifier()) {
 hash = (37 * hash) + REGION_SPECIFIER_FIELD_NUMBER;
 hash = (53 * hash) + getRegionSpecifier().hashCode();
if (hasStores()) {
 hash = (37 * hash) + STORES_FIELD_NUMBER;
 hash = (53 * hash) + getStores();
if (hasStorefiles()) {
 hash = (37 * hash) + STOREFILES_FIELD_NUMBER;
 hash = (53 * hash) + getStorefiles();
if (hasStoreUncompressedSizeMB()) {
 hash = (37 * hash) + STORE_UNCOMPRESSED_SIZE_MB_FIELD_NUMBER;
 hash = (53 * hash) + getStoreUncompressedSizeMB();
if (hasStorefileSizeMB()) {
 hash = (37 * hash) + STOREFILE_SIZE_MB_FIELD_NUMBER;
 hash = (53 * hash) + getStorefileSizeMB();
if (hasMemStoreSizeMB()) {
 hash = (37 * hash) + MEM_STORE_SIZE_MB_FIELD_NUMBER;
 hash = (53 * hash) + getMemStoreSizeMB();
if (hasStorefileIndexSizeKB()) {
 hash = (37 * hash) + STOREFILE_INDEX_SIZE_KB_FIELD_NUMBER;
 hash = (53 * hash) + org.apache.hbase.thirdparty.com.google.protobuf.Internal.hashLong(
   getStorefileIndexSizeKB());
origin: org.apache.hbase/hbase-protocol-shaded

public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad other) {
 if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.getDefaultInstance()) return this;
 if (other.hasRegionSpecifier()) {
  mergeRegionSpecifier(other.getRegionSpecifier());
 if (other.hasStores()) {
  setStores(other.getStores());
 if (other.hasStorefiles()) {
  setStorefiles(other.getStorefiles());
 if (other.hasStoreUncompressedSizeMB()) {
  setStoreUncompressedSizeMB(other.getStoreUncompressedSizeMB());
 if (other.hasStorefileSizeMB()) {
  setStorefileSizeMB(other.getStorefileSizeMB());
 if (other.hasMemStoreSizeMB()) {
  setMemStoreSizeMB(other.getMemStoreSizeMB());
 if (other.hasStorefileIndexSizeKB()) {
  setStorefileIndexSizeKB(other.getStorefileIndexSizeKB());
 if (other.hasReadRequestsCount()) {
  setReadRequestsCount(other.getReadRequestsCount());
 if (other.hasWriteRequestsCount()) {
  setWriteRequestsCount(other.getWriteRequestsCount());
 if (other.hasTotalCompactingKVs()) {
origin: com.aliyun.hbase/alihbase-client

public static ClusterStatusProtos.RegionLoad toRegionLoad(RegionMetrics regionMetrics) {
 return ClusterStatusProtos.RegionLoad.newBuilder()
   .setRegionSpecifier(HBaseProtos.RegionSpecifier
    .newBuilder().setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME)
origin: com.aliyun.hbase/alihbase-client

public static RegionMetrics toRegionMetrics(ClusterStatusProtos.RegionLoad regionLoadPB) {
 return RegionMetricsBuilder
   .newBuilder(regionLoadPB.getRegionSpecifier().getValue().toByteArray())
   .setBloomFilterSize(new Size(regionLoadPB.getTotalStaticBloomSizeKB(), Size.Unit.KILOBYTE))
   .setCompactedCellCount(regionLoadPB.getCurrentCompactedKVs())
   .setCompactingCellCount(regionLoadPB.getTotalCompactingKVs())
   .setCompletedSequenceId(regionLoadPB.getCompleteSequenceId())
   .setDataLocality(regionLoadPB.hasDataLocality() ? regionLoadPB.getDataLocality() : 0.0f)
   .setFilteredReadRequestCount(regionLoadPB.getFilteredReadRequestsCount())
   .setStoreFileUncompressedDataIndexSize(new Size(regionLoadPB.getTotalStaticIndexSizeKB(),
    Size.Unit.KILOBYTE))
   .setLastMajorCompactionTimestamp(regionLoadPB.getLastMajorCompactionTs())
   .setMemStoreSize(new Size(regionLoadPB.getMemStoreSizeMB(), Size.Unit.MEGABYTE))
   .setReadRequestCount(regionLoadPB.getReadRequestsCount())
   .setWriteRequestCount(regionLoadPB.getWriteRequestsCount())
   .setStoreFileIndexSize(new Size(regionLoadPB.getStorefileIndexSizeKB(),
    Size.Unit.KILOBYTE))
   .setStoreFileRootLevelIndexSize(new Size(regionLoadPB.getRootIndexSizeKB(),
    Size.Unit.KILOBYTE))
   .setStoreCount(regionLoadPB.getStores())
   .setStoreFileCount(regionLoadPB.getStorefiles())
   .setStoreFileSize(new Size(regionLoadPB.getStorefileSizeMB(), Size.Unit.MEGABYTE))
   .setStoreSequenceIds(regionLoadPB.getStoreCompleteSequenceIdList().stream()
    .collect(Collectors.toMap(
     (ClusterStatusProtos.StoreSequenceId s) -> s.getFamilyName().toByteArray(),
      ClusterStatusProtos.StoreSequenceId::getSequenceId)))
   .setUncompressedStoreFileSize(
    new Size(regionLoadPB.getStoreUncompressedSizeMB(),Size.Unit.MEGABYTE))
   .build();
}
origin: org.apache.hbase/hbase-server

private ClusterStatusProtos.ServerLoad createServerLoadProto() {
 HBaseProtos.RegionSpecifier rSpecOne = HBaseProtos.RegionSpecifier.newBuilder()
   .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
   .setValue(ByteString.copyFromUtf8("ASDFGQWERT")).build();
 HBaseProtos.RegionSpecifier rSpecTwo = HBaseProtos.RegionSpecifier.newBuilder()
   .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
   .setValue(ByteString.copyFromUtf8("QWERTYUIOP")).build();
 ClusterStatusProtos.RegionLoad rlOne =
   ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10)
     .setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520)
     .setFilteredReadRequestsCount(100).setStorefileIndexSizeKB(42).setRootIndexSizeKB(201)
     .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE)
     .build();
 ClusterStatusProtos.RegionLoad rlTwo =
   ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3)
     .setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300)
     .setFilteredReadRequestsCount(200).setStorefileIndexSizeKB(40).setRootIndexSizeKB(303)
     .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE)
     .build();
 ClusterStatusProtos.ServerLoad sl =
   ClusterStatusProtos.ServerLoad.newBuilder().addRegionLoads(rlOne).
     addRegionLoads(rlTwo).build();
 return sl;
}
origin: org.apache.hbase/hbase-server

private ClusterStatusProtos.ServerLoad createServerLoadProto() {
 HBaseProtos.RegionSpecifier rSpecOne = HBaseProtos.RegionSpecifier.newBuilder()
  .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
  .setValue(ByteString.copyFromUtf8("ASDFGQWERT")).build();
 HBaseProtos.RegionSpecifier rSpecTwo = HBaseProtos.RegionSpecifier.newBuilder()
  .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
  .setValue(ByteString.copyFromUtf8("QWERTYUIOP")).build();
 ClusterStatusProtos.RegionLoad rlOne =
  ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10)
   .setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520)
   .setFilteredReadRequestsCount(100).setStorefileIndexSizeKB(42).setRootIndexSizeKB(201)
   .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build();
 ClusterStatusProtos.RegionLoad rlTwo =
  ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3)
   .setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300)
   .setFilteredReadRequestsCount(200).setStorefileIndexSizeKB(40).setRootIndexSizeKB(303)
   .setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build();
 ClusterStatusProtos.ServerLoad sl =
  ClusterStatusProtos.ServerLoad.newBuilder().addRegionLoads(rlOne).
   addRegionLoads(rlTwo).build();
 return sl;
}
origin: org.apache.hbase/hbase-server

private RegionServerStatusProtos.RegionServerReportRequest.Builder
  makeRSReportRequestWithRegions(final ServerName sn, HRegionInfo... regions) {
 ClusterStatusProtos.ServerLoad.Builder sl = ClusterStatusProtos.ServerLoad.newBuilder();
 for (int i = 0; i < regions.length; i++) {
  HBaseProtos.RegionSpecifier.Builder rs = HBaseProtos.RegionSpecifier.newBuilder();
  rs.setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME);
  rs.setValue(UnsafeByteOperations.unsafeWrap(regions[i].getRegionName()));
  ClusterStatusProtos.RegionLoad.Builder rl = ClusterStatusProtos.RegionLoad.newBuilder()
    .setRegionSpecifier(rs.build());
  sl.addRegionLoads(i, rl.build());
 }
 return RegionServerStatusProtos.RegionServerReportRequest.newBuilder()
      .setServer(ProtobufUtil.toServerName(sn))
      .setLoad(sl);
}
origin: org.apache.hbase/hbase-protocol-shaded

public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder() {
origin: com.aliyun.hbase/alihbase-client

public static long getTotalRequestsCount(RegionLoad rl) {
 if (rl == null) {
  return 0;
 }
 return rl.getReadRequestsCount() + rl.getWriteRequestsCount();
}
origin: org.apache.hbase/hbase-client

public static ClusterStatusProtos.RegionLoad toRegionLoad(RegionMetrics regionMetrics) {
 return ClusterStatusProtos.RegionLoad.newBuilder()
   .setRegionSpecifier(HBaseProtos.RegionSpecifier
    .newBuilder().setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME)
origin: org.apache.hbase/hbase-client

public static RegionMetrics toRegionMetrics(ClusterStatusProtos.RegionLoad regionLoadPB) {
 return RegionMetricsBuilder
   .newBuilder(regionLoadPB.getRegionSpecifier().getValue().toByteArray())
   .setBloomFilterSize(new Size(regionLoadPB.getTotalStaticBloomSizeKB(), Size.Unit.KILOBYTE))
   .setCompactedCellCount(regionLoadPB.getCurrentCompactedKVs())
   .setCompactingCellCount(regionLoadPB.getTotalCompactingKVs())
   .setCompletedSequenceId(regionLoadPB.getCompleteSequenceId())
   .setDataLocality(regionLoadPB.hasDataLocality() ? regionLoadPB.getDataLocality() : 0.0f)
   .setFilteredReadRequestCount(regionLoadPB.getFilteredReadRequestsCount())
   .setStoreFileUncompressedDataIndexSize(new Size(regionLoadPB.getTotalStaticIndexSizeKB(),
    Size.Unit.KILOBYTE))
   .setLastMajorCompactionTimestamp(regionLoadPB.getLastMajorCompactionTs())
   .setMemStoreSize(new Size(regionLoadPB.getMemStoreSizeMB(), Size.Unit.MEGABYTE))
   .setReadRequestCount(regionLoadPB.getReadRequestsCount())
   .setWriteRequestCount(regionLoadPB.getWriteRequestsCount())
   .setStoreFileIndexSize(new Size(regionLoadPB.getStorefileIndexSizeKB(),
    Size.Unit.KILOBYTE))
   .setStoreFileRootLevelIndexSize(new Size(regionLoadPB.getRootIndexSizeKB(),
    Size.Unit.KILOBYTE))
   .setStoreCount(regionLoadPB.getStores())
   .setStoreFileCount(regionLoadPB.getStorefiles())
   .setStoreFileSize(new Size(regionLoadPB.getStorefileSizeMB(), Size.Unit.MEGABYTE))
   .setStoreSequenceIds(regionLoadPB.getStoreCompleteSequenceIdList().stream()
    .collect(Collectors.toMap(
     (ClusterStatusProtos.StoreSequenceId s) -> s.getFamilyName().toByteArray(),
      ClusterStatusProtos.StoreSequenceId::getSequenceId)))
   .setUncompressedStoreFileSize(
    new Size(regionLoadPB.getStoreUncompressedSizeMB(),Size.Unit.MEGABYTE))
   .build();
}
origin: org.apache.hbase/hbase-client

public static long getTotalRequestsCount(RegionLoad rl) {
 if (rl == null) {
  return 0;
 }
 return rl.getReadRequestsCount() + rl.getWriteRequestsCount();
}
origin: apache/hbase

result = result && (hasRegionSpecifier() == other.hasRegionSpecifier());
if (hasRegionSpecifier()) {
 result = result && getRegionSpecifier()
   .equals(other.getRegionSpecifier());
result = result && (hasStores() == other.hasStores());
if (hasStores()) {
 result = result && (getStores()
   == other.getStores());
result = result && (hasStorefiles() == other.hasStorefiles());
if (hasStorefiles()) {
 result = result && (getStorefiles()
   == other.getStorefiles());
result = result && (hasStoreUncompressedSizeMB() == other.hasStoreUncompressedSizeMB());
if (hasStoreUncompressedSizeMB()) {
 result = result && (getStoreUncompressedSizeMB()
   == other.getStoreUncompressedSizeMB());
result = result && (hasStorefileSizeMB() == other.hasStorefileSizeMB());
if (hasStorefileSizeMB()) {
 result = result && (getStorefileSizeMB()
   == other.getStorefileSizeMB());
result = result && (hasMemStoreSizeMB() == other.hasMemStoreSizeMB());
if (hasMemStoreSizeMB()) {
 result = result && (getMemStoreSizeMB()
   == other.getMemStoreSizeMB());
origin: apache/hbase

hash = (19 * hash) + getDescriptor().hashCode();
if (hasRegionSpecifier()) {
 hash = (37 * hash) + REGION_SPECIFIER_FIELD_NUMBER;
 hash = (53 * hash) + getRegionSpecifier().hashCode();
if (hasStores()) {
 hash = (37 * hash) + STORES_FIELD_NUMBER;
 hash = (53 * hash) + getStores();
if (hasStorefiles()) {
 hash = (37 * hash) + STOREFILES_FIELD_NUMBER;
 hash = (53 * hash) + getStorefiles();
if (hasStoreUncompressedSizeMB()) {
 hash = (37 * hash) + STORE_UNCOMPRESSED_SIZE_MB_FIELD_NUMBER;
 hash = (53 * hash) + getStoreUncompressedSizeMB();
if (hasStorefileSizeMB()) {
 hash = (37 * hash) + STOREFILE_SIZE_MB_FIELD_NUMBER;
 hash = (53 * hash) + getStorefileSizeMB();
if (hasMemStoreSizeMB()) {
 hash = (37 * hash) + MEM_STORE_SIZE_MB_FIELD_NUMBER;
 hash = (53 * hash) + getMemStoreSizeMB();
if (hasStorefileIndexSizeKB()) {
 hash = (37 * hash) + STOREFILE_INDEX_SIZE_KB_FIELD_NUMBER;
 hash = (53 * hash) + org.apache.hbase.thirdparty.com.google.protobuf.Internal.hashLong(
   getStorefileIndexSizeKB());
origin: apache/hbase

  r.getHDFSBlocksDistribution().getBlockLocalityIndex(serverName.getHostname());
if (regionLoadBldr == null) {
 regionLoadBldr = RegionLoad.newBuilder();
origin: apache/hbase

public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad other) {
 if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad.getDefaultInstance()) return this;
 if (other.hasRegionSpecifier()) {
  mergeRegionSpecifier(other.getRegionSpecifier());
 if (other.hasStores()) {
  setStores(other.getStores());
 if (other.hasStorefiles()) {
  setStorefiles(other.getStorefiles());
 if (other.hasStoreUncompressedSizeMB()) {
  setStoreUncompressedSizeMB(other.getStoreUncompressedSizeMB());
 if (other.hasStorefileSizeMB()) {
  setStorefileSizeMB(other.getStorefileSizeMB());
 if (other.hasMemStoreSizeMB()) {
  setMemStoreSizeMB(other.getMemStoreSizeMB());
 if (other.hasStorefileIndexSizeKB()) {
  setStorefileIndexSizeKB(other.getStorefileIndexSizeKB());
 if (other.hasReadRequestsCount()) {
  setReadRequestsCount(other.getReadRequestsCount());
 if (other.hasWriteRequestsCount()) {
  setWriteRequestsCount(other.getWriteRequestsCount());
 if (other.hasTotalCompactingKVs()) {
org.apache.hadoop.hbase.shaded.protobuf.generatedClusterStatusProtos$RegionLoad

Javadoc

Protobuf type hbase.pb.RegionLoad

Most used methods

  • newBuilder
  • getCurrentCompactedKVs
    the completed count of key values in currently running compaction optional uint64 current_comp
  • getDataLocality
    The current data locality for region in the regionserver optional float data_locality = 16;
  • getFilteredReadRequestsCount
    the current total filtered read requests made to region optional uint64 filtered_read_requests
  • getLastMajorCompactionTs
    optional uint64 last_major_compaction_ts = 17 [default = 0];
  • getMemStoreSizeMB
    the current size of the memstore for the region, in MB optional uint32 mem_store_size_MB = 6;
  • getReadRequestsCount
    the current total read requests made to region optional uint64 read_requests_count = 8;
  • getStoreUncompressedSizeMB
    the total size of the store files for the region, uncompressed, in MB optional uint32 store_un
  • getStorefileSizeMB
    the current total size of the store files for the region, in MB optional uint32 storefile_size
  • getStorefiles
    the number of storefiles for the region optional uint32 storefiles = 3;
  • getStores
    the number of stores for the region optional uint32 stores = 2;
  • getTotalCompactingKVs
    the total compacting key values in currently running compaction optional uint64 total_compacti
  • getStores,
  • getTotalCompactingKVs,
  • getTotalStaticBloomSizeKB,
  • getTotalStaticIndexSizeKB,
  • getWriteRequestsCount,
  • getCompleteSequenceId,
  • getRegionSpecifier,
  • getRootIndexSizeKB,
  • getStoreCompleteSequenceIdList,
  • getStorefileIndexSizeKB

Popular in Java

  • Updating database using SQL prepared statement
  • orElseThrow (Optional)
    Return the contained value, if present, otherwise throw an exception to be created by the provided s
  • getSystemService (Context)
  • getOriginalFilename (MultipartFile)
    Return the original filename in the client's filesystem.This may contain path information depending
  • BorderLayout (java.awt)
    A border layout lays out a container, arranging and resizing its components to fit in five regions:
  • FileWriter (java.io)
    A specialized Writer that writes to a file in the file system. All write requests made by calling me
  • ArrayList (java.util)
    ArrayList is an implementation of List, backed by an array. All optional operations including adding
  • Date (java.util)
    A specific moment in time, with millisecond precision. Values typically come from System#currentTime
  • DateTimeFormat (org.joda.time.format)
    Factory that creates instances of DateTimeFormatter from patterns and styles. Datetime formatting i
  • Reflections (org.reflections)
    Reflections one-stop-shop objectReflections scans your classpath, indexes the metadata, allows you t
  • Top Sublime Text plugins
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now