maybeHandler = null; job = new HadoopDruidIndexerJob(config, maybeHandler); job.setHadoopJobIdFile(hadoopJobIdFile); if (job.run()) { return HadoopDruidIndexerConfig.JSON_MAPPER.writeValueAsString( new HadoopIndexGeneratorInnerProcessingStatus( job.getPublishedSegments(), job.getStats(), null new HadoopIndexGeneratorInnerProcessingStatus( null, job.getStats(), job.getErrorMessage() new HadoopIndexGeneratorInnerProcessingStatus( null, job.getStats(), e.getMessage()
@Override public void run() { try { Injector injector = makeInjector(); config = getHadoopDruidIndexerConfig(); MetadataStorageUpdaterJobSpec metadataSpec = config.getSchema().getIOConfig().getMetadataUpdateSpec(); // override metadata storage type based on HadoopIOConfig Preconditions.checkNotNull(metadataSpec.getType(), "type in metadataUpdateSpec must not be null"); injector.getInstance(Properties.class).setProperty("druid.metadata.storage.type", metadataSpec.getType()); config = HadoopDruidIndexerConfig.fromSpec( HadoopIngestionSpec.updateSegmentListIfDatasourcePathSpecIsUsed( config.getSchema(), HadoopDruidIndexerConfig.JSON_MAPPER, new MetadataStoreBasedUsedSegmentLister( injector.getInstance(IndexerMetadataStorageCoordinator.class) ) ) ); List<Jobby> jobs = new ArrayList<>(); jobs.add(new HadoopDruidDetermineConfigurationJob(config)); jobs.add(new HadoopDruidIndexerJob(config, injector.getInstance(MetadataStorageUpdaterJobHandler.class))); JobHelper.runJobs(jobs, config); } catch (Exception e) { throw Throwables.propagate(e); } }
public Map<String, Object> getStats() { if (job == null) { return null; } return job.getStats(); } }
@Override public void run() { try { Injector injector = makeInjector(); config = getHadoopDruidIndexerConfig(); MetadataStorageUpdaterJobSpec metadataSpec = config.getSchema().getIOConfig().getMetadataUpdateSpec(); // override metadata storage type based on HadoopIOConfig Preconditions.checkNotNull(metadataSpec.getType(), "type in metadataUpdateSpec must not be null"); injector.getInstance(Properties.class).setProperty("druid.metadata.storage.type", metadataSpec.getType()); config = HadoopDruidIndexerConfig.fromSpec( HadoopIngestionSpec.updateSegmentListIfDatasourcePathSpecIsUsed( config.getSchema(), HadoopDruidIndexerConfig.JSON_MAPPER, new MetadataStoreBasedUsedSegmentLister( injector.getInstance(IndexerMetadataStorageCoordinator.class) ) ) ); List<Jobby> jobs = Lists.newArrayList(); jobs.add(new HadoopDruidDetermineConfigurationJob(config)); jobs.add(new HadoopDruidIndexerJob(config, injector.getInstance(MetadataStorageUpdaterJobHandler.class))); JobHelper.runJobs(jobs, config); } catch (Exception e) { throw Throwables.propagate(e); } }
public Map<String, Object> getStats() { if (job == null) { return null; } return job.getStats(); } }
maybeHandler = null; job = new HadoopDruidIndexerJob(config, maybeHandler); if (job.run()) { return HadoopDruidIndexerConfig.JSON_MAPPER.writeValueAsString( new HadoopIndexGeneratorInnerProcessingStatus( job.getPublishedSegments(), job.getStats(), null new HadoopIndexGeneratorInnerProcessingStatus( null, job.getStats(), job.getErrorMessage() new HadoopIndexGeneratorInnerProcessingStatus( null, job.getStats(), e.getMessage()