@Override public Timer.Context apply(@Nonnull MetricContext input) { return input.timer(name).time(); } });
@Override public Collection<HiveSpec> call() throws Exception { try (Timer.Context context = metricContext.timer(HIVE_SPEC_COMPUTATION_TIMER).time()) { return policy.getHiveSpecs(new Path(path)); } } });
/** * Add timer metrics to {@link DistributedFileSystem#delete(Path, boolean)} */ public boolean delete (Path f, boolean recursive) throws IOException { try (Closeable context = new TimerContextWithLog(deleteTimer.time(), "delete", f, recursive)) { return super.delete (f, recursive); } }
/** * Add timer metrics to {@link FileSystem#listFiles(Path, boolean)} */ public RemoteIterator<LocatedFileStatus> listFiles(Path f, boolean recursive) throws FileNotFoundException, IOException { try (Closeable context = new TimerContextWithLog(this.listFilesTimer.time(), "listFiles", f, recursive)) { return super.listFiles(f, recursive); } }
/** * Add timer metrics to {@link DistributedFileSystem#open(Path, int)} */ public FSDataInputStream open(Path f, int bufferSize) throws IOException { try (Closeable context = new TimerContextWithLog(this.openTimer.time(), "open", f, bufferSize)) { return super.open(f, bufferSize); } }
/** * Add timer metrics to {@link DistributedFileSystem#getFileStatus(Path)} */ public FileStatus getFileStatus (Path f) throws IOException { try (Closeable context = new TimerContextWithLog(this.getFileStatusTimer.time(), "getFileStatus", f)) { return super.getFileStatus(f); } }
/** * Add timer metrics to {@link DistributedFileSystem#setPermission(Path, FsPermission)} */ public void setPermission (Path f, final FsPermission permission) throws IOException { try (Closeable context = new TimerContextWithLog(this.setPermissionTimer.time(), "setPermission", f, permission)) { super.setPermission(f, permission); } }
/** * Add timer metrics to {@link DistributedFileSystem#append(Path, int, Progressable)} */ public FSDataOutputStream append (Path p, final int bufferSize, Progressable progress) throws IOException { try (Closeable context = new TimerContextWithLog(this.appendTimer.time(), "append", p)) { return super.append(p, bufferSize, progress); } }
/** * Add timer metrics to {@link DistributedFileSystem#concat(Path, Path[])} */ public void concat (Path trg, Path [] psrcs) throws IOException { try (Closeable context = new TimerContextWithLog(this.concatTimer.time(), "concat", trg, psrcs)) { super.concat(trg, psrcs); } } }
/** * Add timer metrics to {@link DistributedFileSystem#rename(Path, Path)} */ public boolean rename (Path src, Path dst) throws IOException { try (Closeable context = new TimerContextWithLog(renameTimer.time(), "rename", src, dst)) { return super.rename(src, dst); } }
/** * Add timer metrics to {@link DistributedFileSystem#setOwner(Path, String, String)} */ public void setOwner(Path f, String user, String group) throws IOException { try (Closeable context = new TimerContextWithLog(this.setOwnerTimer.time(), "setOwner", f, user, group)) { super.setOwner(f, user, group); } }
/** * Add timer metrics to {@link DistributedFileSystem#mkdirs(Path, FsPermission)} */ public boolean mkdirs(Path f, FsPermission permission) throws IOException { try (Closeable context = new TimerContextWithLog(mkdirTimer.time(), "mkdirs", f, permission)) { return super.mkdirs (f, permission); } }
/** * Add timer metrics to {@link DistributedFileSystem#setTimes(Path, long, long)} */ public void setTimes (Path f, long t, long a) throws IOException { try (Closeable context = new TimerContextWithLog(this.setTimesTimer.time(), "setTimes", f, t, a)) { super.setTimes(f, t, a); } }
private void addSchemaProperties(Path path, HiveRegistrationUnit hiveUnit, Schema schema) throws IOException { Path schemaFile = new Path(path, this.schemaFileName); if (this.useSchemaFile) { hiveUnit.setSerDeProp(SCHEMA_URL, schemaFile.toString()); } else { try (Timer.Context context = metricContext.timer(HIVE_SPEC_SCHEMA_WRITING_TIMER).time()) { addSchemaFromAvroFile(schema, schemaFile, hiveUnit); } } }
/** * Add timer metrics to {@link DistributedFileSystem#create(Path, FsPermission, boolean, int, short, long, Progressable)} */ public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { try (Closeable context = new TimerContextWithLog(this.createTimer.time(), "create", f, permission, overwrite, bufferSize, replication, blockSize, progress)) { return super.create(f, permission, overwrite, bufferSize, replication, blockSize, progress); } }
@Override public boolean existsTable(String dbName, String tableName) throws IOException { if (this.optimizedChecks && this.tableAndDbExistenceCache.getIfPresent(dbName + ":" + tableName ) != null ) { return true; } try (AutoReturnableObject<IMetaStoreClient> client = this.clientPool.getClient()) { try (Timer.Context context = this.metricContext.timer(TABLE_EXISTS).time()) { return client.get().tableExists(dbName, tableName); } } catch (TException e) { throw new IOException(String.format("Unable to check existence of table %s in db %s", tableName, dbName), e); } }
/** * Add timer metrics to {@link DistributedFileSystem#globStatus(Path)} */ public FileStatus[] globStatus(Path pathPattern) throws IOException { try (TimerContextWithLog context = new TimerContextWithLog(globStatusTimer.time(), "globStatus", pathPattern)) { FileStatus[] statuses = super.globStatus(pathPattern); context.setResult(statuses); return statuses; } }
/** * Add timer metrics to {@link DistributedFileSystem#listStatus(Path)} */ public FileStatus[] listStatus(Path path) throws IOException { try (TimerContextWithLog context = new TimerContextWithLog(listStatusTimer.time(), "listStatus", path)) { FileStatus[] statuses = super.listStatus(path); context.setResult(statuses); return statuses; } }
/** * Add timer metrics to {@link DistributedFileSystem#globStatus(Path, PathFilter)} */ public FileStatus[] globStatus(Path pathPattern, PathFilter filter) throws IOException { try (TimerContextWithLog context = new TimerContextWithLog(globStatusTimer.time(), "globStatus", pathPattern, filter)) { FileStatus[] statuses = super.globStatus(pathPattern, filter); context.setResult(statuses); return statuses; } }
@Override public void alterPartition(HiveTable table, HivePartition partition) throws IOException { try (AutoReturnableObject<IMetaStoreClient> client = this.clientPool.getClient()) { try (Timer.Context context = this.metricContext.timer(ALTER_PARTITION).time()) { client.get().alter_partition(table.getDbName(), table.getTableName(), getPartitionWithCreateTimeNow(HiveMetaStoreUtils.getPartition(partition))); } HiveMetaStoreEventHelper.submitSuccessfulPartitionAlter(eventSubmitter, table, partition); } catch (TException e) { HiveMetaStoreEventHelper.submitFailedPartitionAlter(eventSubmitter, table, partition, e); throw new IOException(String.format("Unable to alter partition %s in table %s in db %s", partition.getValues(), table.getTableName(), table.getDbName()), e); } }