/** * Get a {@link Timer} with the given name prefix and suffixes. * * @param prefix the given name prefix * @param suffixes the given name suffixes * @return a {@link Timer} with the given name prefix and suffixes */ public Timer getTimer(String prefix, String... suffixes) { return this.metricContext.timer(MetricRegistry.name(prefix, suffixes)); }
public MetricsFileSystemInstrumentation(FileSystem underlying) { super(underlying); this.closer = Closer.create(); this.metricContext = new MetricContext.Builder(underlying.getUri() + "_metrics").build(); this.metricContext = this.closer.register(metricContext); this.listStatusTimer = this.metricContext.timer("listStatus"); this.listFilesTimer = this.metricContext.timer("listFiles"); this.globStatusTimer = this.metricContext.timer("globStatus"); this.mkdirTimer = this.metricContext.timer("mkdirs"); this.renameTimer = this.metricContext.timer("rename"); this.deleteTimer = this.metricContext.timer("delete"); this.createTimer = this.metricContext.timer("create"); this.openTimer = this.metricContext.timer("open"); this.setOwnerTimer = this.metricContext.timer("setOwner"); this.getFileStatusTimer = this.metricContext.timer("getFileStatus"); this.setPermissionTimer = this.metricContext.timer("setPermission"); this.setTimesTimer = this.metricContext.timer("setTimes"); this.appendTimer = this.metricContext.timer ("append"); this.concatTimer = this.metricContext.timer ("concat"); this.allTimers = ImmutableList.<ContextAwareTimer>builder().add( this.listStatusTimer, this.listFilesTimer, this.globStatusTimer, this.mkdirTimer, this.renameTimer, this.deleteTimer, this.createTimer, this.openTimer, this.setOwnerTimer, this.getFileStatusTimer, this.setPermissionTimer, this.setTimesTimer, this.appendTimer, this.concatTimer ).build(); }
private void addSchemaProperties(Path path, HiveRegistrationUnit hiveUnit) throws IOException { Preconditions.checkArgument(this.fs.getFileStatus(path).isDirectory(), path + " is not a directory."); Path schemaFile = new Path(path, this.schemaFileName); if (this.useSchemaFile) { hiveUnit.setSerDeProp(SCHEMA_URL, schemaFile.toString()); } else { Schema schema ; try (Timer.Context context = metricContext.timer(HIVE_SPEC_SCHEMA_READING_TIMER).time()) { schema = getDirectorySchema(path); } try (Timer.Context context = metricContext.timer(HIVE_SPEC_SCHEMA_WRITING_TIMER).time()) { addSchemaFromAvroFile(schema, schemaFile, hiveUnit); } } }
@Override public boolean existsTable(String dbName, String tableName) throws IOException { if (this.optimizedChecks && this.tableAndDbExistenceCache.getIfPresent(dbName + ":" + tableName ) != null ) { return true; } try (AutoReturnableObject<IMetaStoreClient> client = this.clientPool.getClient()) { try (Timer.Context context = this.metricContext.timer(TABLE_EXISTS).time()) { return client.get().tableExists(dbName, tableName); } } catch (TException e) { throw new IOException(String.format("Unable to check existence of table %s in db %s", tableName, dbName), e); } }
@Override public Optional<HivePartition> getPartition(String dbName, String tableName, List<Column> partitionKeys, List<String> partitionValues) throws IOException { try (AutoReturnableObject<IMetaStoreClient> client = this.clientPool.getClient()) { Partition hivePartition; try (Timer.Context context = this.metricContext.timer(GET_HIVE_PARTITION).time()) { hivePartition = client.get().getPartition(dbName, tableName, partitionValues); } return Optional.of(HiveMetaStoreUtils.getHivePartition(hivePartition)); } catch (NoSuchObjectException e) { return Optional.<HivePartition> absent(); } catch (TException e) { throw new IOException( "Unable to get partition " + partitionValues + " from table " + tableName + " in db " + dbName, e); } }
/** * End the previous stage and record the time spent in that stage. */ public void endStage() { if (this.currentStage != null) { long time = System.currentTimeMillis() - this.currentStageStart; this.timings.add(new Stage(this.currentStage, time)); if (reportAsMetrics && submitter.getMetricContext().isPresent()) { String timerName = submitter.getNamespace() + "." + name + "." + this.currentStage; submitter.getMetricContext().get().timer(timerName).update(time, TimeUnit.MILLISECONDS); } } this.currentStage = null; }
@Override public boolean existsPartition(String dbName, String tableName, List<Column> partitionKeys, List<String> partitionValues) throws IOException { try (AutoReturnableObject<IMetaStoreClient> client = this.clientPool.getClient()) { try (Timer.Context context = this.metricContext.timer(GET_HIVE_PARTITION).time()) { client.get().getPartition(dbName, tableName, partitionValues); } return true; } catch (NoSuchObjectException e) { return false; } catch (TException e) { throw new IOException(String.format("Unable to check existence of partition %s in table %s in db %s", partitionValues, tableName, dbName), e); } }
this.flowCompilationSuccessFulMeter = Optional.of(this.metricContext.meter(ServiceMetricNames.FLOW_COMPILATION_SUCCESSFUL_METER)); this.flowCompilationFailedMeter = Optional.of(this.metricContext.meter(ServiceMetricNames.FLOW_COMPILATION_FAILED_METER)); this.flowCompilationTimer = Optional.<Timer>of(this.metricContext.timer(ServiceMetricNames.FLOW_COMPILATION_TIMER));
public Orchestrator(Config config, Optional<TopologyCatalog> topologyCatalog, Optional<Logger> log, boolean instrumentationEnabled) { _log = log.isPresent() ? log.get() : LoggerFactory.getLogger(getClass()); if (instrumentationEnabled) { this.metricContext = Instrumented.getMetricContext(ConfigUtils.configToState(config), IdentityFlowToJobSpecCompiler.class); this.flowOrchestrationSuccessFulMeter = Optional.of(this.metricContext.meter(ServiceMetricNames.FLOW_ORCHESTRATION_SUCCESSFUL_METER)); this.flowOrchestrationFailedMeter = Optional.of(this.metricContext.meter(ServiceMetricNames.FLOW_ORCHESTRATION_FAILED_METER)); this.flowOrchestrationTimer = Optional.<Timer>of(this.metricContext.timer(ServiceMetricNames.FLOW_ORCHESTRATION_TIMER)); } else { this.metricContext = null; this.flowOrchestrationSuccessFulMeter = Optional.absent(); this.flowOrchestrationFailedMeter = Optional.absent(); this.flowOrchestrationTimer = Optional.absent(); } this.aliasResolver = new ClassAliasResolver<>(SpecCompiler.class); this.topologyCatalog = topologyCatalog; try { String specCompilerClassName = ServiceConfigKeys.DEFAULT_GOBBLIN_SERVICE_FLOWCOMPILER_CLASS; if (config.hasPath(ServiceConfigKeys.GOBBLIN_SERVICE_FLOWCOMPILER_CLASS_KEY)) { specCompilerClassName = config.getString(ServiceConfigKeys.GOBBLIN_SERVICE_FLOWCOMPILER_CLASS_KEY); } _log.info("Using specCompiler class name/alias " + specCompilerClassName); this.specCompiler = (SpecCompiler) ConstructorUtils.invokeConstructor(Class.forName(this.aliasResolver.resolve( specCompilerClassName)), config); } catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException | InstantiationException | ClassNotFoundException e) { throw new RuntimeException(e); } }
@Override public Optional<HiveTable> getTable(String dbName, String tableName) throws IOException { try (AutoReturnableObject<IMetaStoreClient> client = this.clientPool.getClient()) { Table hiveTable; try (Timer.Context context = this.metricContext.timer(GET_HIVE_TABLE).time()) { hiveTable = client.get().getTable(dbName, tableName); } return Optional.of(HiveMetaStoreUtils.getHiveTable(hiveTable)); } catch (NoSuchObjectException e) { return Optional.<HiveTable> absent(); } catch (TException e) { throw new IOException("Unable to get table " + tableName + " in db " + dbName, e); } }
@Override protected void configure() { try { RestLiConfig restLiConfig = new RestLiConfig(); restLiConfig.setResourcePackageNames("gobblin.restli.throttling"); bind(RestLiConfig.class).toInstance(restLiConfig); bind(SharedResourcesBroker.class).annotatedWith(Names.named(LimiterServerResource.BROKER_INJECT_NAME)).toInstance(topLevelBroker); MetricContext metricContext = topLevelBroker.getSharedResource(new MetricContextFactory<ThrottlingServerScopes>(), new MetricContextKey()); Timer timer = metricContext.timer(LimiterServerResource.REQUEST_TIMER_NAME); bind(MetricContext.class).annotatedWith(Names.named(LimiterServerResource.METRIC_CONTEXT_INJECT_NAME)).toInstance(metricContext); bind(Timer.class).annotatedWith(Names.named(LimiterServerResource.REQUEST_TIMER_INJECT_NAME)).toInstance(timer); bind(new TypeLiteral<Optional<LeaderFinder<URIMetadata>>>() { }).annotatedWith(Names.named(LimiterServerResource.LEADER_FINDER_INJECT_NAME)).toInstance(leaderFinder); List<RestFilter> restFilters = new ArrayList<>(); restFilters.add(new ServerCompressionFilter(EncodingType.SNAPPY.getHttpName())); List<StreamFilter> streamFilters = new ArrayList<>(); streamFilters.add(new SimpleLoggingFilter()); FilterChain filterChain = FilterChains.create(restFilters, streamFilters); bind(FilterChain.class).toInstance(filterChain); } catch (NotConfiguredException nce) { throw new RuntimeException(nce); } } }, new ServletModule() {
/** User needs to provide eventhub properties */ public EventhubDataWriter(Properties properties) { PasswordManager manager = PasswordManager.getInstance(properties); namespaceName = properties.getProperty(BatchedEventhubDataWriter.EVH_NAMESPACE); eventHubName = properties.getProperty(BatchedEventhubDataWriter.EVH_HUBNAME); sasKeyName = properties.getProperty(BatchedEventhubDataWriter.EVH_SAS_KEYNAME); String encodedSasKey = properties.getProperty(BatchedEventhubDataWriter.EVH_SAS_KEYVALUE); sasKey = manager.readPassword(encodedSasKey); targetURI = "https://" + namespaceName + ".servicebus.windows.net/" + eventHubName + "/messages"; httpclient = HttpClients.createDefault(); metricContext = Instrumented.getMetricContext(new State(properties),EventhubDataWriter.class); recordsAttempted = this.metricContext.meter(EventhubMetricNames.EventhubDataWriterMetrics.RECORDS_ATTEMPTED_METER); recordsSuccess = this.metricContext.meter(EventhubMetricNames.EventhubDataWriterMetrics.RECORDS_SUCCESS_METER); recordsFailed = this.metricContext.meter(EventhubMetricNames.EventhubDataWriterMetrics.RECORDS_FAILED_METER); bytesWritten = this.metricContext.meter(EventhubMetricNames.EventhubDataWriterMetrics.BYTES_WRITTEN_METER); writeTimer = this.metricContext.timer(EventhubMetricNames.EventhubDataWriterMetrics.WRITE_TIMER); }
@Override public void dropTableIfExists(String dbName, String tableName) throws IOException { try (AutoReturnableObject<IMetaStoreClient> client = this.clientPool.getClient()) { boolean tableExists; try (Timer.Context context = this.metricContext.timer(TABLE_EXISTS).time()) { tableExists = client.get().tableExists(dbName, tableName); } if (tableExists) { try (Timer.Context context = this.metricContext.timer(DROP_TABLE).time()) { client.get().dropTable(dbName, tableName); } String metastoreURI = this.clientPool.getHiveConf().get(HiveMetaStoreClientFactory.HIVE_METASTORE_TOKEN_SIGNATURE, "null"); HiveMetaStoreEventHelper.submitSuccessfulTableDrop(eventSubmitter, dbName, tableName, metastoreURI); log.info("Dropped table " + tableName + " in db " + dbName); } } catch (TException e) { HiveMetaStoreEventHelper.submitFailedTableDrop(eventSubmitter, dbName, tableName, e); throw new IOException(String.format("Unable to deregister table %s in db %s", tableName, dbName), e); } }
@Override public void dropPartitionIfExists(String dbName, String tableName, List<Column> partitionKeys, List<String> partitionValues) throws IOException { try (AutoReturnableObject<IMetaStoreClient> client = this.clientPool.getClient()) { try (Timer.Context context = this.metricContext.timer(DROP_TABLE).time()) { client.get().dropPartition(dbName, tableName, partitionValues, false); } String metastoreURI = this.clientPool.getHiveConf().get(HiveMetaStoreClientFactory.HIVE_METASTORE_TOKEN_SIGNATURE, "null"); HiveMetaStoreEventHelper.submitSuccessfulPartitionDrop(eventSubmitter, dbName, tableName, partitionValues, metastoreURI); log.info("Dropped partition " + partitionValues + " in table " + tableName + " in db " + dbName); } catch (NoSuchObjectException e) { // Partition does not exist. Nothing to do } catch (TException e) { HiveMetaStoreEventHelper.submitFailedPartitionDrop(eventSubmitter, dbName, tableName, partitionValues, e); throw new IOException(String.format("Unable to check existence of Hive partition %s in table %s in db %s", partitionValues, tableName, dbName), e); } }
private boolean createTableIfNotExists(IMetaStoreClient client, Table table, HiveTable hiveTable) throws IOException { String dbName = table.getDbName(); String tableName = table.getTableName(); try (AutoCloseableLock lock = this.locks.getTableLock(dbName, tableName)) { boolean tableExists; try (Timer.Context context = this.metricContext.timer(TABLE_EXISTS).time()) { tableExists = client.tableExists(table.getDbName(), table.getTableName()); } if (tableExists) { return false; } try (Timer.Context context = this.metricContext.timer(CREATE_HIVE_TABLE).time()) { client.createTable(getTableWithCreateTimeNow(table)); } log.info(String.format("Created Hive table %s in db %s", tableName, dbName)); HiveMetaStoreEventHelper.submitSuccessfulTableCreation(this.eventSubmitter, hiveTable); return true; } catch (TException e) { HiveMetaStoreEventHelper.submitFailedTableCreation(eventSubmitter, hiveTable, e); throw new IOException(String.format("Error in creating or altering Hive table %s in db %s", table.getTableName(), table.getDbName()), e); } }
@Override public void alterPartition(HiveTable table, HivePartition partition) throws IOException { try (AutoReturnableObject<IMetaStoreClient> client = this.clientPool.getClient()) { try (Timer.Context context = this.metricContext.timer(ALTER_PARTITION).time()) { client.get().alter_partition(table.getDbName(), table.getTableName(), getPartitionWithCreateTimeNow(HiveMetaStoreUtils.getPartition(partition))); } HiveMetaStoreEventHelper.submitSuccessfulPartitionAlter(eventSubmitter, table, partition); } catch (TException e) { HiveMetaStoreEventHelper.submitFailedPartitionAlter(eventSubmitter, table, partition, e); throw new IOException(String.format("Unable to alter partition %s in table %s in db %s", partition.getValues(), table.getTableName(), table.getDbName()), e); } }
Meter permitsRequestedMeter = resourceContext.meter(PERMITS_REQUESTED_METER_NAME); Meter permitsGrantedMeter = resourceContext.meter(PERMITS_GRANTED_METER_NAME); Timer limiterTimer = resourceContext.timer(LIMITER_TIMER_NAME);
@Override protected void registerPath(HiveSpec spec) throws IOException { try (Timer.Context context = this.metricContext.timer(PATH_REGISTER_TIMER).time(); AutoReturnableObject<IMetaStoreClient> client = this.clientPool.getClient()) { Table table = HiveMetaStoreUtils.getTable(spec.getTable()); createDbIfNotExists(client.get(), table.getDbName()); createOrAlterTable(client.get(), table, spec); Optional<HivePartition> partition = spec.getPartition(); if (partition.isPresent()) { addOrAlterPartition(client.get(), table, HiveMetaStoreUtils.getPartition(partition.get()), spec); } HiveMetaStoreEventHelper.submitSuccessfulPathRegistration(eventSubmitter, spec); } catch (TException e) { HiveMetaStoreEventHelper.submitFailedPathRegistration(eventSubmitter, spec, e); throw new IOException(e); } }
@Override public void alterTable(HiveTable table) throws IOException { try (AutoReturnableObject<IMetaStoreClient> client = this.clientPool.getClient()) { boolean tableExists; try (Timer.Context context = this.metricContext.timer(TABLE_EXISTS).time()) { tableExists = client.get().tableExists(table.getDbName(), table.getTableName()); } if (!tableExists) { throw new IOException("Table " + table.getTableName() + " in db " + table.getDbName() + " does not exist"); } try (Timer.Context context = this.metricContext.timer(ALTER_TABLE).time()) { client.get().alter_table(table.getDbName(), table.getTableName(), getTableWithCreateTimeNow(HiveMetaStoreUtils.getTable(table))); } HiveMetaStoreEventHelper.submitSuccessfulTableAlter(eventSubmitter, table); } catch (TException e) { HiveMetaStoreEventHelper.submitFailedTableAlter(eventSubmitter, table, e); throw new IOException("Unable to alter table " + table.getTableName() + " in db " + table.getDbName(), e); } }
@Override public boolean addPartitionIfNotExists(HiveTable table, HivePartition partition) throws IOException { try (AutoReturnableObject<IMetaStoreClient> client = this.clientPool.getClient(); AutoCloseableLock lock = this.locks.getTableLock(table.getDbName(), table.getTableName())) { try { try (Timer.Context context = this.metricContext.timer(GET_HIVE_PARTITION).time()) { client.get().getPartition(table.getDbName(), table.getTableName(), partition.getValues()); } return false; } catch (NoSuchObjectException e) { try (Timer.Context context = this.metricContext.timer(ALTER_PARTITION).time()) { client.get().alter_partition(table.getDbName(), table.getTableName(), getPartitionWithCreateTimeNow(HiveMetaStoreUtils.getPartition(partition))); } HiveMetaStoreEventHelper.submitSuccessfulPartitionAdd(this.eventSubmitter, table, partition); return true; } } catch (TException e) { HiveMetaStoreEventHelper.submitFailedPartitionAdd(this.eventSubmitter, table, partition, e); throw new IOException(String.format("Unable to add partition %s in table %s in db %s", partition.getValues(), table.getTableName(), table.getDbName()), e); } }