Tabnine Logo
FsImageProto$INodeSection$INodeFile.getBlocksCount
Code IndexAdd Tabnine to your IDE (free)

How to use
getBlocksCount
method
in
org.apache.hadoop.hdfs.server.namenode.FsImageProto$INodeSection$INodeFile

Best Java code snippets using org.apache.hadoop.hdfs.server.namenode.FsImageProto$INodeSection$INodeFile.getBlocksCount (Showing top 14 results out of 315)

origin: org.apache.hadoop/hadoop-hdfs

++totalFiles;
INodeSection.INodeFile f = p.getFile();
totalBlocks += f.getBlocksCount();
long fileSize = 0;
for (BlockProto b : f.getBlocksList()) {
origin: org.apache.hadoop/hadoop-hdfs

if (f.getBlocksCount() > 0) {
 out.print("<" + INODE_SECTION_BLOCKS + ">");
 for (BlockProto b : f.getBlocksList()) {
origin: org.apache.hadoop/hadoop-hdfs

append(buffer, formatDate(file.getAccessTime()));
append(buffer, file.getPreferredBlockSize());
append(buffer, file.getBlocksCount());
append(buffer, FSImageLoader.getFileSize(file));
append(buffer, 0);  // NS_QUOTA
origin: io.prestosql.hadoop/hadoop-apache

append(buffer, formatDate(file.getAccessTime()));
append(buffer, file.getPreferredBlockSize());
append(buffer, file.getBlocksCount());
append(buffer, FSImageLoader.getFileSize(file));
append(buffer, 0);  // NS_QUOTA
origin: ch.cern.hadoop/hadoop-hdfs

append(buffer, formatDate(file.getAccessTime()));
append(buffer, file.getPreferredBlockSize());
append(buffer, file.getBlocksCount());
append(buffer, FSImageLoader.getFileSize(file));
append(buffer, 0);  // NS_QUOTA
origin: io.prestosql.hadoop/hadoop-apache

++totalFiles;
INodeSection.INodeFile f = p.getFile();
totalBlocks += f.getBlocksCount();
long fileSize = 0;
for (BlockProto b : f.getBlocksList()) {
origin: ch.cern.hadoop/hadoop-hdfs

++totalFiles;
INodeSection.INodeFile f = p.getFile();
totalBlocks += f.getBlocksCount();
long fileSize = 0;
for (BlockProto b : f.getBlocksList()) {
origin: io.prestosql.hadoop/hadoop-apache

private void dumpINodeFile(INodeSection.INodeFile f) {
 o("replication", f.getReplication()).o("mtime", f.getModificationTime())
   .o("atime", f.getAccessTime())
   .o("perferredBlockSize", f.getPreferredBlockSize())
   .o("permission", dumpPermission(f.getPermission()));
 if (f.getBlocksCount() > 0) {
  out.print("<blocks>");
  for (BlockProto b : f.getBlocksList()) {
   out.print("<block>");
   o("id", b.getBlockId()).o("genstamp", b.getGenStamp()).o("numBytes",
     b.getNumBytes());
   out.print("</block>\n");
  }
  out.print("</blocks>\n");
 }
 if (f.hasFileUC()) {
  INodeSection.FileUnderConstructionFeature u = f.getFileUC();
  out.print("<file-under-construction>");
  o("clientName", u.getClientName()).o("clientMachine",
    u.getClientMachine());
  out.print("</file-under-construction>\n");
 }
}
origin: ch.cern.hadoop/hadoop-hdfs

private void dumpINodeFile(INodeSection.INodeFile f) {
 o("replication", f.getReplication()).o("mtime", f.getModificationTime())
   .o("atime", f.getAccessTime())
   .o("perferredBlockSize", f.getPreferredBlockSize())
   .o("permission", dumpPermission(f.getPermission()));
 if (f.getBlocksCount() > 0) {
  out.print("<blocks>");
  for (BlockProto b : f.getBlocksList()) {
   out.print("<block>");
   o("id", b.getBlockId()).o("genstamp", b.getGenStamp()).o("numBytes",
     b.getNumBytes());
   out.print("</block>\n");
  }
  out.print("</blocks>\n");
 }
 if (f.hasFileUC()) {
  INodeSection.FileUnderConstructionFeature u = f.getFileUC();
  out.print("<file-under-construction>");
  o("clientName", u.getClientName()).o("clientMachine",
    u.getClientMachine());
  out.print("</file-under-construction>\n");
 }
}
origin: marcelmay/hadoop-hdfs-fsimage-exporter

@Override
public void onFile(FsImageProto.INodeSection.INode inode, String path) {
  FsImageProto.INodeSection.INodeFile f = inode.getFile();
  PermissionStatus p = loader.getPermissionStatus(f.getPermission());
  final long fileSize = FSImageLoader.getFileSize(f);
  final long fileBlocks = f.getBlocksCount();
  overallStats.sumBlocks.add(fileBlocks);
  overallStats.fileSize.observe(fileSize);
  overallStats.replication.observe(f.getReplication());
  // Group stats
  final String groupName = p.getGroupName();
  final GroupStats groupStat = report.groupStats.computeIfAbsent(groupName, report.createGroupStats);
  groupStat.sumBlocks.add(fileBlocks);
  groupStat.fileSize.observe(fileSize);
  // User stats
  final String userName = p.getUserName();
  UserStats userStat = report.userStats.computeIfAbsent(userName, report.createUserStat);
  userStat.sumBlocks.add(fileBlocks);
  userStat.fileSize.observe(fileSize);
  userStat.replication.observe(f.getReplication());
}
origin: io.prestosql.hadoop/hadoop-apache

public final boolean isInitialized() {
 byte isInitialized = memoizedIsInitialized;
 if (isInitialized != -1) return isInitialized == 1;
 for (int i = 0; i < getBlocksCount(); i++) {
  if (!getBlocks(i).isInitialized()) {
   memoizedIsInitialized = 0;
   return false;
  }
 }
 if (hasXAttrs()) {
  if (!getXAttrs().isInitialized()) {
   memoizedIsInitialized = 0;
   return false;
  }
 }
 memoizedIsInitialized = 1;
 return true;
}
origin: ch.cern.hadoop/hadoop-hdfs

public final boolean isInitialized() {
 byte isInitialized = memoizedIsInitialized;
 if (isInitialized != -1) return isInitialized == 1;
 for (int i = 0; i < getBlocksCount(); i++) {
  if (!getBlocks(i).isInitialized()) {
   memoizedIsInitialized = 0;
   return false;
  }
 }
 if (hasXAttrs()) {
  if (!getXAttrs().isInitialized()) {
   memoizedIsInitialized = 0;
   return false;
  }
 }
 memoizedIsInitialized = 1;
 return true;
}
origin: marcelmay/hadoop-hdfs-fsimage-exporter

@Override
public void onFile(FsImageProto.INodeSection.INode inode, String path) {
  FsImageProto.INodeSection.INodeFile f = inode.getFile();
  pathStats.sumBlocks.add(f.getBlocksCount());
  final long fileSize = FSImageLoader.getFileSize(f);
  pathStats.fileSize.observe(fileSize);
}
origin: org.apache.hadoop/hadoop-hdfs

public final boolean isInitialized() {
 byte isInitialized = memoizedIsInitialized;
 if (isInitialized != -1) return isInitialized == 1;
 for (int i = 0; i < getBlocksCount(); i++) {
  if (!getBlocks(i).isInitialized()) {
   memoizedIsInitialized = 0;
   return false;
  }
 }
 if (hasXAttrs()) {
  if (!getXAttrs().isInitialized()) {
   memoizedIsInitialized = 0;
   return false;
  }
 }
 memoizedIsInitialized = 1;
 return true;
}
org.apache.hadoop.hdfs.server.namenodeFsImageProto$INodeSection$INodeFilegetBlocksCount

Javadoc

repeated .hadoop.hdfs.BlockProto blocks = 6;

Popular methods of FsImageProto$INodeSection$INodeFile

  • getPermission
    optional fixed64 permission = 5;
  • getReplication
    optional uint32 replication = 1;
  • newBuilder
  • <init>
  • getAccessTime
    optional uint64 accessTime = 3;
  • getAcl
    optional .hadoop.hdfs.fsimage.INodeSection.AclFeatureProto acl = 8;
  • getBlocks
    repeated .hadoop.hdfs.BlockProto blocks = 6;
  • getBlocksList
    repeated .hadoop.hdfs.BlockProto blocks = 6;
  • getDefaultInstance
  • getFileUC
    optional .hadoop.hdfs.fsimage.INodeSection.FileUnderConstructionFeature fileUC = 7;
  • getModificationTime
    optional uint64 modificationTime = 2;
  • getPreferredBlockSize
    optional uint64 preferredBlockSize = 4;
  • getModificationTime,
  • getPreferredBlockSize,
  • getSerializedSize,
  • getStoragePolicyID,
  • getUnknownFields,
  • getXAttrs,
  • hasAccessTime,
  • hasAcl,
  • hasFileUC

Popular in Java

  • Updating database using SQL prepared statement
  • runOnUiThread (Activity)
  • onCreateOptionsMenu (Activity)
  • setContentView (Activity)
  • PrintWriter (java.io)
    Wraps either an existing OutputStream or an existing Writerand provides convenience methods for prin
  • URLConnection (java.net)
    A connection to a URL for reading or writing. For HTTP connections, see HttpURLConnection for docume
  • Collection (java.util)
    Collection is the root of the collection hierarchy. It defines operations on data collections and t
  • TimeUnit (java.util.concurrent)
    A TimeUnit represents time durations at a given unit of granularity and provides utility methods to
  • ReentrantLock (java.util.concurrent.locks)
    A reentrant mutual exclusion Lock with the same basic behavior and semantics as the implicit monitor
  • Base64 (org.apache.commons.codec.binary)
    Provides Base64 encoding and decoding as defined by RFC 2045.This class implements section 6.8. Base
  • Top plugins for WebStorm
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now