/** * 对分组后的数据进行最终的load操作 */ protected void load(List<RecordChunk<T>> recordChunkList, final TaskWriterContext context) { //statistic before WriterStatistic writerStatistic = context.taskWriterSession().getData(WriterStatistic.KEY); long startTime = System.currentTimeMillis(); //do load submitAndWait(recordChunkList, context); //statistic after writerStatistic.setTimeForLoad(System.currentTimeMillis() - startTime); }
public void postPush() { BaseWriterStatistic statistic = context.taskWriterSession().getData(WriterStatistic.KEY); if (parameter.isPerfStatistic()) { logger.info(statistic.toJsonString()); } }
private void doGroup(List<RdbEventRecord> records, TaskWriterContext context) { //statistic before WriterStatistic writerStatistic = context.taskWriterSession().getData(WriterStatistic.KEY); long key = RecordMeta.mediaMapping(records.get(0)).getTargetMediaSource().getId(); writerStatistic.getGroupLoadStatistics().get(key).getExtendStatistic().put(StatisticKey.TABLE_GROUP_RECORDS_COUNT, records.size()); long startTime = System.currentTimeMillis(); //do group records.stream().forEach(r -> groupForOneRecord(r)); logger.debug("Table Size in this Batch is " + tables.size()); //statistic after long timeThrough = System.currentTimeMillis() - startTime; writerStatistic.getGroupLoadStatistics().get(key).getExtendStatistic().put(StatisticKey.TABLE_GROUP_TIME_THROUGH, timeThrough); writerStatistic.getGroupLoadStatistics().get(key).getExtendStatistic().put(StatisticKey.TABLE_GROUP_TABLE_COUNT, tables.size()); writerStatistic.getGroupLoadStatistics().get(key).getExtendStatistic().put(StatisticKey.TABLE_GROUP_TIME_PER_RECORD, new BigDecimal(((double) timeThrough) / records.size()).setScale(2, RoundingMode.UP).doubleValue()); }
private void buildSql(List<RdbEventRecord> records, TaskWriterContext context) { WriterStatistic writerStatistic = context.taskWriterSession().getData(WriterStatistic.KEY); long key = RecordMeta.mediaMapping(records.get(0)).getTargetMediaSource().getId(); writerStatistic.getGroupLoadStatistics().get(key).getExtendStatistic().put(StatisticKey.SQL_BUILD_RECORDS_COUNT, records.size());
/** * 按照MediaMapping的配置,重建数据,并对数据进行清洗和转换 */ protected RecordChunk<T> transform(RecordChunk<T> recordChunk, TaskWriterContext context) { //statistic before WriterStatistic writerStatistic = context.taskWriterSession().getData(WriterStatistic.KEY); writerStatistic.setRecordsCountBeforeTransform(recordChunk.getRecords().size()); long startTime = System.currentTimeMillis(); //do transform Transformer<T> transformer = TransformerFactory. getTransformer(recordChunk.getRecords().get(0).getClass()); RecordChunk<T> newChunk = transformer.transform(recordChunk, context); //statistic after writerStatistic.setRecordsCountAfterTransform(newChunk.getRecords().size()); writerStatistic.setTimeForTransform(System.currentTimeMillis() - startTime); return newChunk; }
public void prePush() { context.beginSession(); context.taskWriterSession().setData(WriterStatistic.KEY, new WriterStatistic(context.taskId(), parameter)); }
WriterStatistic writerStatistic = context.taskWriterSession().getData(WriterStatistic.KEY); writerStatistic.setRecordsCountBeforeGroup(recordChunk.getRecords().size()); long startTime = System.currentTimeMillis();
/** * 需要的话,可以对数据进行merge操作 */ protected RecordChunk<T> merge(RecordChunk<T> recordChunk, TaskWriterContext context) { //statistic before WriterStatistic writerStatistic = context.taskWriterSession().getData(WriterStatistic.KEY); writerStatistic.setRecordsCountBeforeMerge(recordChunk.getRecords().size()); Long startTime = System.currentTimeMillis(); //do merge if (context.getWriterParameter().isMerging()) { Merger<T> merger = MergerFactory. getMerger(recordChunk.getRecords().get(0).getClass()); RecordChunk<T> newChunk = merger.merge(recordChunk); //statistic after writerStatistic.setTimeForMerge(System.currentTimeMillis() - startTime); writerStatistic.setRecordsCountAfterMerge(newChunk.getRecords().size()); return newChunk; } else { //statistic after writerStatistic.setTimeForMerge(System.currentTimeMillis() - startTime); writerStatistic.setRecordsCountAfterMerge(recordChunk.getRecords().size()); return recordChunk; } }
if (records.size() > 0) { WriterStatistic writerStatistic = context.taskWriterSession().getData(WriterStatistic.KEY); long key = RecordMeta.mediaMapping(records.get(0)).getTargetMediaSource().getId(); long tableGroupStartTime = System.currentTimeMillis();
/** * 对每一个数据源通道执行数据同步 */ private void loadData(RecordChunk<T> recordChunk, TaskWriterContext context, PriorityTaskExecutor priorityTaskExecutor) throws InterruptedException { List<T> records = recordChunk.getRecords(); if (records == null || records.size() == 0) { return; } //load before WriterStatistic writerStatistic = context.taskWriterSession().getData(WriterStatistic.KEY); MediaSourceInfo targetMediaSource = RecordMeta.mediaMapping(records.get(0)).getTargetMediaSource(); RecordGroupLoadStatistic loadStatistic = new RecordGroupLoadStatistic(); writerStatistic.getGroupLoadStatistics().put(targetMediaSource.getId(), loadStatistic); loadStatistic.setMediaSourceId(targetMediaSource.getId()); loadStatistic.setGroupRecordsCount(recordChunk.getRecords().size()); long startTime = System.currentTimeMillis(); //do load PriorityTask<T> priorityTask = buildPriorityTask(records, context); priorityTaskExecutor.execute(priorityTask); //load after long timeThrough = System.currentTimeMillis() - startTime; loadStatistic.setGroupLoadTime(timeThrough); if (timeThrough != 0L) { loadStatistic.setGroupTps(new BigDecimal(records.size() / (((double) timeThrough) / 1000)).longValue()); } }
@SuppressWarnings("unchecked") private void doLoad(TaskWriterContext context, List<List<RdbEventRecord>> records) { WriterStatistic writerStatistic = context.taskWriterSession().getData(WriterStatistic.KEY); long startTime = System.currentTimeMillis();
protected RecordChunk<T> mapping(RecordChunk<T> recordChunk, TaskWriterContext context) { WriterStatistic writerStatistic = context.taskWriterSession().getData(WriterStatistic.KEY); writerStatistic.setRecordsCountBeforeMapping(recordChunk.getRecords().size()); Long startTime = System.currentTimeMillis();
WriterStatistic writerStatistic = context.taskWriterSession().getData(WriterStatistic.KEY); writerStatistic.setRecordsCountBeforeIntercept(recordChunk.getRecords().size()); Long startTime = System.currentTimeMillis();