public void updateRollbackMetrics(long durationInMs, long numFilesDeleted) { if (config.isMetricsOn()) { logger.info(String .format("Sending rollback metrics (duration=%d, numFilesDeleted=$d)", durationInMs, numFilesDeleted)); registerGauge(getMetricsName("rollback", "duration"), durationInMs); registerGauge(getMetricsName("rollback", "numFilesDeleted"), numFilesDeleted); } }
public Timer.Context getCompactionCtx() { if (config.isMetricsOn() && compactionTimer == null) { compactionTimer = createTimer(commitTimerName); } return compactionTimer == null ? null : compactionTimer.time(); }
final Timer.Context finalizeCtx = metrics.getFinalizeCtx(); try { table.finalizeWrite(jsc, stats); if (finalizeCtx != null) { Optional<Long> durationInMs = Optional.of(metrics.getDurationInMs(finalizeCtx.stop())); durationInMs.ifPresent(duration -> { logger.info("Finalize write elapsed time (milliseconds): " + duration); metrics.updateFinalizeWriteMetrics(duration, stats.size()); }); long durationInMs = metrics.getDurationInMs(writeContext.stop()); metrics .updateCommitMetrics(HoodieActiveTimeline.COMMIT_FORMATTER.parse(commitTime).getTime(), durationInMs, metadata, actionType); writeContext = null;
try { logger.info("Cleaner started"); final Timer.Context context = metrics.getCleanCtx(); durationInMs = Optional.of(metrics.getDurationInMs(context.stop())); logger.info("cleanerElaspsedTime (Minutes): " + durationInMs.get() / (1000 * 60)); logger.info("Cleaned " + metadata.getTotalFilesDeleted() + " files"); metrics .updateCleanMetrics(durationInMs.orElseGet(() -> -1L), metadata.getTotalFilesDeleted());
final Timer.Context context = metrics.getRollbackCtx(); String startRollbackTime = HoodieActiveTimeline.COMMIT_FORMATTER.format(new Date()); durationInMs = Optional.of(metrics.getDurationInMs(context.stop())); Long numFilesDeleted = stats.stream().mapToLong(stat -> stat.getSuccessDeleteFiles().size()) .sum(); metrics.updateRollbackMetrics(durationInMs.get(), numFilesDeleted);
@VisibleForTesting HoodieWriteClient(JavaSparkContext jsc, HoodieWriteConfig clientConfig, boolean rollbackInFlight, HoodieIndex index) { this.fs = FSUtils.getFs(clientConfig.getBasePath(), jsc.hadoopConfiguration()); this.jsc = jsc; this.config = clientConfig; this.index = index; this.metrics = new HoodieMetrics(config, config.getTableName()); this.rollbackInFlight = rollbackInFlight; }
private HoodieTable getTableAndInitCtx() { // Create a Hoodie table which encapsulated the commits and files visible // Create a Hoodie table which encapsulated the commits and files visible HoodieTable table = HoodieTable.getHoodieTable( new HoodieTableMetaClient(jsc.hadoopConfiguration(), config.getBasePath(), true), config, jsc); if (table.getMetaClient().getCommitActionType().equals(HoodieTimeline.COMMIT_ACTION)) { writeContext = metrics.getCommitCtx(); } else { writeContext = metrics.getDeltaCommitCtx(); } return table; }
/** * Commit Compaction and track metrics * * @param compactedStatuses Compaction Write status * @param table Hoodie Table * @param compactionCommitTime Compaction Commit Time * @param autoCommit Auto Commit * @param extraMetadata Extra Metadata to store */ protected void commitCompaction(JavaRDD<WriteStatus> compactedStatuses, HoodieTable<T> table, String compactionCommitTime, boolean autoCommit, Optional<Map<String, String>> extraMetadata) { if (autoCommit) { HoodieCommitMetadata metadata = doCompactionCommit(compactedStatuses, table.getMetaClient(), compactionCommitTime, extraMetadata); if (compactionTimer != null) { long durationInMs = metrics.getDurationInMs(compactionTimer.stop()); try { metrics.updateCommitMetrics(HoodieActiveTimeline.COMMIT_FORMATTER.parse(compactionCommitTime).getTime(), durationInMs, metadata, HoodieActiveTimeline.COMPACTION_ACTION); } catch (ParseException e) { throw new HoodieCommitException( "Commit time is not of valid format.Failed to commit compaction " + config.getBasePath() + " at time " + compactionCommitTime, e); } } logger.info("Compacted successfully on commit " + compactionCommitTime); } else { logger.info("Compaction did not run for commit " + compactionCommitTime); } }
@Test public void testRegisterGauge() { metrics.registerGauge("metric1", 123L); assertTrue(Metrics.getInstance().getRegistry().getGauges().get("metric1").getValue().toString().equals("123")); } }
public HoodieMetrics(HoodieWriteConfig config, String tableName) { this.config = config; this.tableName = tableName; if (config.isMetricsOn()) { Metrics.init(config); this.rollbackTimerName = getMetricsName("timer", HoodieTimeline.ROLLBACK_ACTION); this.cleanTimerName = getMetricsName("timer", HoodieTimeline.CLEAN_ACTION); this.commitTimerName = getMetricsName("timer", HoodieTimeline.COMMIT_ACTION); this.deltaCommitTimerName = getMetricsName("timer", HoodieTimeline.DELTA_COMMIT_ACTION); this.finalizeTimerName = getMetricsName("timer", "finalize"); this.compactionTimerName = getMetricsName("timer", HoodieTimeline.COMPACTION_ACTION); } }
/** * Perform compaction operations as specified in the compaction commit file * * @param compactionInstant Compacton Instant time * @param activeTimeline Active Timeline * @param autoCommit Commit after compaction * @return RDD of Write Status */ private JavaRDD<WriteStatus> runCompaction( HoodieInstant compactionInstant, HoodieActiveTimeline activeTimeline, boolean autoCommit) throws IOException { HoodieCompactionPlan compactionPlan = AvroUtils.deserializeCompactionPlan( activeTimeline.getInstantAuxiliaryDetails(compactionInstant).get()); // Mark instant as compaction inflight activeTimeline.transitionCompactionRequestedToInflight(compactionInstant); compactionTimer = metrics.getCompactionCtx(); // Create a Hoodie table which encapsulated the commits and files visible HoodieTableMetaClient metaClient = new HoodieTableMetaClient(jsc.hadoopConfiguration(), config.getBasePath(), true); HoodieTable<T> table = HoodieTable.getHoodieTable(metaClient, config, jsc); JavaRDD<WriteStatus> statuses = table.compact(jsc, compactionInstant.getTimestamp(), compactionPlan); // Force compaction action statuses.persist(config.getWriteStatusStorageLevel()); // pass extra-metada so that it gets stored in commit file automatically commitCompaction(statuses, table, compactionInstant.getTimestamp(), autoCommit, Optional.ofNullable(compactionPlan.getExtraMetadata())); return statuses; }
final Timer.Context finalizeCtx = metrics.getFinalizeCtx(); try { table.finalizeWrite(jsc, writeStatusList); if (finalizeCtx != null) { Optional<Long> durationInMs = Optional.of(metrics.getDurationInMs(finalizeCtx.stop())); durationInMs.ifPresent(duration -> { logger.info("Finalize write elapsed time (milliseconds): " + duration); metrics.updateFinalizeWriteMetrics(duration, writeStatusList.size()); }); long durationInMs = metrics.getDurationInMs(writeContext.stop()); metrics .updateCommitMetrics(HoodieActiveTimeline.COMMIT_FORMATTER.parse(commitTime).getTime(), durationInMs, metadata, actionType); writeContext = null;
try { logger.info("Cleaner started"); final Timer.Context context = metrics.getCleanCtx(); durationInMs = Optional.of(metrics.getDurationInMs(context.stop())); logger.info("cleanerElaspsedTime (Minutes): " + durationInMs.get() / (1000 * 60)); logger.info("Cleaned " + metadata.getTotalFilesDeleted() + " files"); metrics .updateCleanMetrics(durationInMs.orElseGet(() -> -1L), metadata.getTotalFilesDeleted());
final Timer.Context context = metrics.getRollbackCtx(); String startRollbackTime = HoodieActiveTimeline.COMMIT_FORMATTER.format(new Date()); durationInMs = Optional.of(metrics.getDurationInMs(context.stop())); Long numFilesDeleted = stats.stream().mapToLong(stat -> stat.getSuccessDeleteFiles().size()) .sum(); metrics.updateRollbackMetrics(durationInMs.get(), numFilesDeleted);
@VisibleForTesting HoodieWriteClient(JavaSparkContext jsc, HoodieWriteConfig clientConfig, boolean rollbackInFlight, HoodieIndex index) { this.fs = FSUtils.getFs(clientConfig.getBasePath(), jsc.hadoopConfiguration()); this.jsc = jsc; this.config = clientConfig; this.index = index; this.metrics = new HoodieMetrics(config, config.getTableName()); this.rollbackInFlight = rollbackInFlight; }
private HoodieTable getTableAndInitCtx() { // Create a Hoodie table which encapsulated the commits and files visible // Create a Hoodie table which encapsulated the commits and files visible HoodieTable table = HoodieTable.getHoodieTable( new HoodieTableMetaClient(jsc.hadoopConfiguration(), config.getBasePath(), true), config, jsc); if (table.getMetaClient().getCommitActionType() == HoodieTimeline.COMMIT_ACTION) { writeContext = metrics.getCommitCtx(); } else { writeContext = metrics.getDeltaCommitCtx(); } return table; }
/** * Commit Compaction and track metrics * * @param compactedStatuses Compaction Write status * @param table Hoodie Table * @param compactionCommitTime Compaction Commit Time * @param autoCommit Auto Commit * @param extraMetadata Extra Metadata to store */ protected void commitCompaction(JavaRDD<WriteStatus> compactedStatuses, HoodieTable<T> table, String compactionCommitTime, boolean autoCommit, Optional<Map<String, String>> extraMetadata) { if (autoCommit) { HoodieCommitMetadata metadata = doCompactionCommit(compactedStatuses, table.getMetaClient(), compactionCommitTime, extraMetadata); if (compactionTimer != null) { long durationInMs = metrics.getDurationInMs(compactionTimer.stop()); try { metrics.updateCommitMetrics(HoodieActiveTimeline.COMMIT_FORMATTER.parse(compactionCommitTime).getTime(), durationInMs, metadata, HoodieActiveTimeline.COMPACTION_ACTION); } catch (ParseException e) { throw new HoodieCommitException( "Commit time is not of valid format.Failed to commit compaction " + config.getBasePath() + " at time " + compactionCommitTime, e); } } logger.info("Compacted successfully on commit " + compactionCommitTime); } else { logger.info("Compaction did not run for commit " + compactionCommitTime); } }
public HoodieMetrics(HoodieWriteConfig config, String tableName) { this.config = config; this.tableName = tableName; if (config.isMetricsOn()) { Metrics.init(config); this.rollbackTimerName = getMetricsName("timer", HoodieTimeline.ROLLBACK_ACTION); this.cleanTimerName = getMetricsName("timer", HoodieTimeline.CLEAN_ACTION); this.commitTimerName = getMetricsName("timer", HoodieTimeline.COMMIT_ACTION); this.deltaCommitTimerName = getMetricsName("timer", HoodieTimeline.DELTA_COMMIT_ACTION); this.finalizeTimerName = getMetricsName("timer", "finalize"); this.compactionTimerName = getMetricsName("timer", HoodieTimeline.COMPACTION_ACTION); } }
/** * Perform compaction operations as specified in the compaction commit file * * @param compactionInstant Compacton Instant time * @param activeTimeline Active Timeline * @param autoCommit Commit after compaction * @return RDD of Write Status */ private JavaRDD<WriteStatus> runCompaction( HoodieInstant compactionInstant, HoodieActiveTimeline activeTimeline, boolean autoCommit) throws IOException { HoodieCompactionPlan compactionPlan = AvroUtils.deserializeCompactionPlan( activeTimeline.getInstantAuxiliaryDetails(compactionInstant).get()); // Mark instant as compaction inflight activeTimeline.transitionCompactionRequestedToInflight(compactionInstant); compactionTimer = metrics.getCompactionCtx(); // Create a Hoodie table which encapsulated the commits and files visible HoodieTableMetaClient metaClient = new HoodieTableMetaClient(jsc.hadoopConfiguration(), config.getBasePath(), true); HoodieTable<T> table = HoodieTable.getHoodieTable(metaClient, config, jsc); JavaRDD<WriteStatus> statuses = table.compact(jsc, compactionInstant.getTimestamp(), compactionPlan); // Force compaction action statuses.persist(config.getWriteStatusStorageLevel()); // pass extra-metada so that it gets stored in commit file automatically commitCompaction(statuses, table, compactionInstant.getTimestamp(), autoCommit, Optional.ofNullable(compactionPlan.getExtraMetadata())); return statuses; }
public void updateRollbackMetrics(long durationInMs, long numFilesDeleted) { if (config.isMetricsOn()) { logger.info(String .format("Sending rollback metrics (duration=%d, numFilesDeleted=%d)", durationInMs, numFilesDeleted)); registerGauge(getMetricsName("rollback", "duration"), durationInMs); registerGauge(getMetricsName("rollback", "numFilesDeleted"), numFilesDeleted); } }