JobQueueDataset(DatasetSpecification spec, @EmbeddedDataset(EMBEDDED_TABLE_NAME) Table table) { super(spec.getName(), table); this.table = table; this.scheduleIds = new ArrayList<>(); // for backwards compat. use the old (pre-5.1) hard-coded value as default; // job queue datasets created with 5.1 or later will have this as a property this.numPartitions = spec.getIntProperty(Constants.Scheduler.JOB_QUEUE_NUM_PARTITIONS, DEFAULT_NUM_PARTITIONS); }
JobQueueDataset(DatasetSpecification spec, @EmbeddedDataset(EMBEDDED_TABLE_NAME) Table table) { super(spec.getName(), table); this.table = table; this.scheduleIds = new ArrayList<>(); // for backwards compat. use the old (pre-5.1) hard-coded value as default; // job queue datasets created with 5.1 or later will have this as a property this.numPartitions = spec.getIntProperty(Constants.Scheduler.JOB_QUEUE_NUM_PARTITIONS, DEFAULT_NUM_PARTITIONS); }
private void initializeVars(CConfiguration cConf, DatasetSpecification spec) { this.scanExecutor = null; this.rowKeyDistributor = null; RejectedExecutionHandler callerRunsPolicy = (r, executor) -> { REJECTION_LOG.info( "No more threads in the HBase scan thread pool. Consider increase {}. Performing scan in caller thread {}", Constants.Metrics.METRICS_HBASE_MAX_SCAN_THREADS, Thread.currentThread().getName() ); // Runs it from the caller thread if (!executor.isShutdown()) { r.run(); } }; int maxScanThread = cConf.getInt(Constants.Metrics.METRICS_HBASE_MAX_SCAN_THREADS); // Creates a executor that will shrink to 0 threads if left idle // Uses daemon thread, hence no need to worry about shutdown // When all threads are busy, use the caller thread to execute this.scanExecutor = new ThreadPoolExecutor(0, maxScanThread, 60L, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(), Threads.createDaemonThreadFactory("metrics-hbase-scanner-%d"), callerRunsPolicy); this.rowKeyDistributor = new RowKeyDistributorByHashPrefix( new RowKeyDistributorByHashPrefix. OneByteSimpleHash(spec.getIntProperty(Constants.Metrics.METRICS_HBASE_TABLE_SPLITS, 16))); }
@Override public MetricsTable getDataset(DatasetContext datasetContext, DatasetSpecification spec, Map<String, String> arguments, ClassLoader classLoader) throws IOException { int datasetSplits = spec.getIntProperty(Constants.Metrics.METRICS_HBASE_TABLE_SPLITS, 16); // Detect if there is a cdap-site change on the number of splits, which we don't support. // Log a warning if that's the case. if (cConf.getInt(Constants.Metrics.METRICS_HBASE_TABLE_SPLITS) != datasetSplits) { CONFIG_CHANGE_LOG.warn( "Ignoring configuration {} with value {} from cdap-site.xml. " + "The system table {} already has a splits value {}, which can not be changed.", Constants.Metrics.METRICS_HBASE_TABLE_SPLITS, cConf.getInt(Constants.Metrics.METRICS_HBASE_TABLE_SPLITS), spec.getName(), datasetSplits); } return new HBaseMetricsTable(datasetContext, spec, hConf, hBaseTableUtil, cConf); }
@Override public MetricsTable getDataset(DatasetContext datasetContext, DatasetSpecification spec, Map<String, String> arguments, ClassLoader classLoader) throws IOException { int datasetSplits = spec.getIntProperty(Constants.Metrics.METRICS_HBASE_TABLE_SPLITS, 16); // Detect if there is a cdap-site change on the number of splits, which we don't support. // Log a warning if that's the case. if (cConf.getInt(Constants.Metrics.METRICS_HBASE_TABLE_SPLITS) != datasetSplits) { CONFIG_CHANGE_LOG.warn( "Ignoring configuration {} with value {} from cdap-site.xml. " + "The system table {} already has a splits value {}, which can not be changed.", Constants.Metrics.METRICS_HBASE_TABLE_SPLITS, cConf.getInt(Constants.Metrics.METRICS_HBASE_TABLE_SPLITS), spec.getName(), datasetSplits); } return new HBaseMetricsTable(datasetContext, spec, hConf, hBaseTableUtil, cConf); }
OneByteSimpleHash(spec.getIntProperty(Constants.Metrics.METRICS_HBASE_TABLE_SPLITS, 16)));