/** * Check whether metrics collection and reporting are enabled or not. * * @param properties Configuration properties * @return whether metrics collection and reporting are enabled */ public static boolean isEnabled(Properties properties) { return PropertiesUtils .getPropAsBoolean(properties, ConfigurationKeys.METRICS_ENABLED_KEY, ConfigurationKeys.DEFAULT_METRICS_ENABLED); }
private boolean isRetriggeringEnabled() { return PropertiesUtils.getPropAsBoolean(jobProps, ConfigurationKeys.JOB_RETRIGGERING_ENABLED, ConfigurationKeys.DEFAULT_JOB_RETRIGGERING_ENABLED); }
private boolean isDistributeJobEnabled() { Properties combinedProps = new Properties(); combinedProps.putAll(sysProps); combinedProps.putAll(jobProps); return (PropertiesUtils.getPropAsBoolean(combinedProps, GobblinClusterConfigurationKeys.DISTRIBUTED_JOB_LAUNCHER_ENABLED, Boolean.toString(GobblinClusterConfigurationKeys.DEFAULT_DISTRIBUTED_JOB_LAUNCHER_ENABLED))); }
@Override protected void startServices() throws Exception { boolean cleanAllDistJobs = PropertiesUtils.getPropAsBoolean(this.properties, GobblinClusterConfigurationKeys.CLEAN_ALL_DIST_JOBS, String.valueOf(GobblinClusterConfigurationKeys.DEFAULT_CLEAN_ALL_DIST_JOBS)); if (cleanAllDistJobs) { for (org.apache.gobblin.configuration.State state : this.jobsMapping.getAllStates()) { String jobName = state.getId(); LOGGER.info("Delete mapping for job " + jobName); this.jobsMapping.deleteMapping(jobName); } } }
/** * @param props properties should contain property "kafka.schema.registry.url", and optionally * "kafka.schema.registry.max.cache.size" (default = 1000) and * "kafka.schema.registry.cache.expire.after.write.min" (default = 10). */ public LiKafkaSchemaRegistry(Properties props) { Preconditions.checkArgument(props.containsKey(KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_URL), String.format("Property %s not provided.", KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_URL)); this.url = props.getProperty(KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_URL); this.namespaceOverride = KafkaAvroReporterUtil.extractOverrideNamespace(props); this.switchTopicNames = PropertiesUtils.getPropAsBoolean(props, KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_SWITCH_NAME, KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_SWITCH_NAME_DEFAULT); int objPoolSize = Integer.parseInt(props.getProperty(ConfigurationKeys.KAFKA_SOURCE_WORK_UNITS_CREATION_THREADS, "" + ConfigurationKeys.KAFKA_SOURCE_WORK_UNITS_CREATION_DEFAULT_THREAD_COUNT)); LOG.info("Create HttpClient pool with size " + objPoolSize); GenericObjectPoolConfig config = new GenericObjectPoolConfig(); config.setMaxTotal(objPoolSize); config.setMaxIdle(objPoolSize); this.httpClientPool = new GenericObjectPool<>(new HttpClientFactory(), config); }
_log.info("{} Scheduling flow spec: {} ", this.serviceName, addedSpec); scheduleJob(jobConfig, null); if (PropertiesUtils.getPropAsBoolean(jobConfig, ConfigurationKeys.FLOW_RUN_IMMEDIATELY, "false")) { _log.info("RunImmediately requested, hence executing FlowSpec: " + addedSpec); this.jobExecutor.execute(new NonScheduledJobRunner(flowSpec.getUri(), false, jobConfig, null));
private void buildInfluxDBMetricReporter(Properties properties) { boolean metricsEnabled = PropertiesUtils .getPropAsBoolean(properties, ConfigurationKeys.METRICS_REPORTING_INFLUXDB_METRICS_ENABLED_KEY, ConfigurationKeys.DEFAULT_METRICS_REPORTING_INFLUXDB_METRICS_ENABLED); if (metricsEnabled) { .getPropAsBoolean(properties, ConfigurationKeys.METRICS_REPORTING_INFLUXDB_EVENTS_ENABLED_KEY, ConfigurationKeys.DEFAULT_METRICS_REPORTING_INFLUXDB_EVENTS_ENABLED); if (eventsEnabled) {
private void buildGraphiteMetricReporter(Properties properties) { boolean metricsEnabled = PropertiesUtils .getPropAsBoolean(properties, ConfigurationKeys.METRICS_REPORTING_GRAPHITE_METRICS_ENABLED_KEY, ConfigurationKeys.DEFAULT_METRICS_REPORTING_GRAPHITE_METRICS_ENABLED); if (metricsEnabled) { .getPropAsBoolean(properties, ConfigurationKeys.METRICS_REPORTING_GRAPHITE_EVENTS_ENABLED_KEY, ConfigurationKeys.DEFAULT_METRICS_REPORTING_GRAPHITE_EVENTS_ENABLED); if (eventsEnabled) { .getPropAsBoolean(properties, ConfigurationKeys.METRICS_REPORTING_GRAPHITE_EVENTS_VALUE_AS_KEY, ConfigurationKeys.DEFAULT_METRICS_REPORTING_GRAPHITE_EVENTS_VALUE_AS_KEY); String eventsPortProp = properties.getProperty(ConfigurationKeys.METRICS_REPORTING_GRAPHITE_EVENTS_PORT);
@Override public Void call() throws JobException { boolean deleteJobWhenException = PropertiesUtils.getPropAsBoolean(this.jobProps, GobblinClusterConfigurationKeys.JOB_ALWAYS_DELETE, "false"); try { if (this.isDistributeJobEnabled) { runJobExecutionLauncher(); } else { runJobLauncherLoop(); } deleteJobSpec(); } catch (Exception e) { // delete job spec when exception occurred if (deleteJobWhenException) { deleteJobSpec(); } throw e; } return null; }
private boolean isRetriggeringEnabled() { return PropertiesUtils.getPropAsBoolean(jobProps, ConfigurationKeys.JOB_RETRIGGERING_ENABLED, ConfigurationKeys.DEFAULT_JOB_RETRIGGERING_ENABLED); }
/** * Check whether metrics collection and reporting are enabled or not. * * @param properties Configuration properties * @return whether metrics collection and reporting are enabled */ public static boolean isEnabled(Properties properties) { return PropertiesUtils .getPropAsBoolean(properties, ConfigurationKeys.METRICS_ENABLED_KEY, ConfigurationKeys.DEFAULT_METRICS_ENABLED); }
public DistributeJobResult(Optional<Properties> properties, Optional<Throwable> throwable) { this.properties = properties; this.throwable = throwable; if (properties.isPresent()) { isEarlyStopped = PropertiesUtils.getPropAsBoolean(this.properties.get(), Partitioner.IS_EARLY_STOPPED, "false"); } } }
private boolean isDistributeJobEnabled() { Properties combinedProps = new Properties(); combinedProps.putAll(sysProps); combinedProps.putAll(jobProps); return (PropertiesUtils.getPropAsBoolean(combinedProps, GobblinClusterConfigurationKeys.DISTRIBUTED_JOB_LAUNCHER_ENABLED, Boolean.toString(GobblinClusterConfigurationKeys.DEFAULT_DISTRIBUTED_JOB_LAUNCHER_ENABLED))); }
/** * @param props properties should contain property "kafka.schema.registry.url", and optionally * "kafka.schema.registry.max.cache.size" (default = 1000) and * "kafka.schema.registry.cache.expire.after.write.min" (default = 10). */ public LiKafkaSchemaRegistry(Properties props) { Preconditions.checkArgument(props.containsKey(KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_URL), String.format("Property %s not provided.", KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_URL)); this.url = props.getProperty(KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_URL); this.namespaceOverride = KafkaAvroReporterUtil.extractOverrideNamespace(props); this.switchTopicNames = PropertiesUtils.getPropAsBoolean(props, KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_SWITCH_NAME, KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_SWITCH_NAME_DEFAULT); int objPoolSize = Integer.parseInt(props.getProperty(ConfigurationKeys.KAFKA_SOURCE_WORK_UNITS_CREATION_THREADS, "" + ConfigurationKeys.KAFKA_SOURCE_WORK_UNITS_CREATION_DEFAULT_THREAD_COUNT)); LOG.info("Create HttpClient pool with size " + objPoolSize); GenericObjectPoolConfig config = new GenericObjectPoolConfig(); config.setMaxTotal(objPoolSize); config.setMaxIdle(objPoolSize); this.httpClientPool = new GenericObjectPool<>(new HttpClientFactory(), config); }
@Override public void run() { boolean alwaysDelete = PropertiesUtils.getPropAsBoolean(this.jobProps, GobblinClusterConfigurationKeys.JOB_ALWAYS_DELETE, "false"); boolean isDeleted = false; try { GobblinHelixJobScheduler.this.jobSchedulerMetrics.updateTimeBeforeJobLaunching(this.jobProps); GobblinHelixJobScheduler.this.jobSchedulerMetrics.updateTimeBetweenJobSchedulingAndJobLaunching(this.creationTimeInMillis, System.currentTimeMillis()); GobblinHelixJobScheduler.this.runJob(this.jobProps, this.jobListener); // remove non-scheduled job catalog once done so it won't be re-executed if (GobblinHelixJobScheduler.this.jobCatalog != null) { try { GobblinHelixJobScheduler.this.jobCatalog.remove(new URI(jobUri)); isDeleted = true; } catch (URISyntaxException e) { LOGGER.error("Failed to remove job with bad uri " + jobUri, e); } } } catch (JobException je) { deleteJobSpec(alwaysDelete, isDeleted); LOGGER.error("Failed to run job " + this.jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY), je); } catch (Exception e) { deleteJobSpec(alwaysDelete, isDeleted); throw e; } } }
_log.info("{} Scheduling flow spec: {} ", this.serviceName, addedSpec); scheduleJob(jobConfig, null); if (PropertiesUtils.getPropAsBoolean(jobConfig, ConfigurationKeys.FLOW_RUN_IMMEDIATELY, "false")) { _log.info("RunImmediately requested, hence executing FlowSpec: " + addedSpec); this.jobExecutor.execute(new NonScheduledJobRunner(flowSpec.getUri(), false, jobConfig, null));
private void buildGraphiteMetricReporter(Properties properties) { boolean metricsEnabled = PropertiesUtils .getPropAsBoolean(properties, ConfigurationKeys.METRICS_REPORTING_GRAPHITE_METRICS_ENABLED_KEY, ConfigurationKeys.DEFAULT_METRICS_REPORTING_GRAPHITE_METRICS_ENABLED); if (metricsEnabled) { .getPropAsBoolean(properties, ConfigurationKeys.METRICS_REPORTING_GRAPHITE_EVENTS_ENABLED_KEY, ConfigurationKeys.DEFAULT_METRICS_REPORTING_GRAPHITE_EVENTS_ENABLED); if (eventsEnabled) { .getPropAsBoolean(properties, ConfigurationKeys.METRICS_REPORTING_GRAPHITE_EVENTS_VALUE_AS_KEY, ConfigurationKeys.DEFAULT_METRICS_REPORTING_GRAPHITE_EVENTS_VALUE_AS_KEY); String eventsPortProp = properties.getProperty(ConfigurationKeys.METRICS_REPORTING_GRAPHITE_EVENTS_PORT);
private void buildInfluxDBMetricReporter(Properties properties) { boolean metricsEnabled = PropertiesUtils .getPropAsBoolean(properties, ConfigurationKeys.METRICS_REPORTING_INFLUXDB_METRICS_ENABLED_KEY, ConfigurationKeys.DEFAULT_METRICS_REPORTING_INFLUXDB_METRICS_ENABLED); if (metricsEnabled) { .getPropAsBoolean(properties, ConfigurationKeys.METRICS_REPORTING_INFLUXDB_EVENTS_ENABLED_KEY, ConfigurationKeys.DEFAULT_METRICS_REPORTING_INFLUXDB_EVENTS_ENABLED); if (eventsEnabled) {