@Override protected void onExecuteError(Throwable exception, ExecutableContext executableContext) { super.onExecuteError(exception, executableContext); onStatusChange(executableContext, ExecuteResult.createError(exception), ExecutableState.ERROR); }
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { KylinConfig config = context.getConfig(); StringBuffer output = new StringBuffer(); try { output.append(cleanUpIntermediateFlatTable(config)); // don't drop view to avoid concurrent issue //output.append(cleanUpHiveViewIntermediateTable(config)); } catch (IOException e) { logger.error("job:" + getId() + " execute finished with exception", e); return ExecuteResult.createError(e); } return new ExecuteResult(ExecuteResult.State.SUCCEED, output.toString()); }
} catch (ReflectiveOperationException e) { logger.error("error getMapReduceJobClass, class name:" + getParam(KEY_MR_JOB), e); return ExecuteResult.createError(e); } catch (Exception e) { logger.error("error execute " + this.toString(), e); return ExecuteResult.createError(e);
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { final CubeManager cubeManager = CubeManager.getInstance(context.getConfig()); final CubeInstance cube = cubeManager.getCube(CubingExecutableUtil.getCubeName(this.getParams())); Set<Long> recommendCuboids = cube.getCuboidsRecommend(); try { List<CubeSegment> newSegments = cube.getSegments(SegmentStatusEnum.READY_PENDING); Map<Long, Long> recommendCuboidsWithStats = CuboidStatsReaderUtil .readCuboidStatsFromSegments(recommendCuboids, newSegments); if (recommendCuboidsWithStats == null) { throw new RuntimeException("Fail to get statistics info for recommended cuboids after optimization!!!"); } cubeManager.promoteCheckpointOptimizeSegments(cube, recommendCuboidsWithStats, newSegments.toArray(new CubeSegment[newSegments.size()])); return new ExecuteResult(); } catch (Exception e) { logger.error("fail to update cube after build", e); return ExecuteResult.createError(e); } }
} catch (Exception e) { logger.error("error run spark job:", e); return ExecuteResult.createError(e);
return ExecuteResult.createError(e); } catch (Exception e) { logger.error("error execute " + this.toString(), e); return ExecuteResult.createError(e);
} catch (IOException e) { logger.error("fail to merge cuboid statistics", e); return ExecuteResult.createError(e);
} catch (IOException e) { logger.error("fail to merge cuboid statistics", e); return ExecuteResult.createError(e);
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { final CubeManager mgr = CubeManager.getInstance(context.getConfig()); final CubeInstance cube = mgr.getCube(CubingExecutableUtil.getCubeName(this.getParams())).latestCopyForWrite(); final CubeSegment optimizeSegment = cube.getSegmentById(CubingExecutableUtil.getSegmentId(this.getParams())); CubeSegment oldSegment = optimizeSegment.getCubeInstance().getOriginalSegmentToOptimize(optimizeSegment); Preconditions.checkNotNull(oldSegment, "cannot find the original segment to be optimized by " + optimizeSegment); // --- Copy dictionary optimizeSegment.getDictionaries().putAll(oldSegment.getDictionaries()); optimizeSegment.getSnapshots().putAll(oldSegment.getSnapshots()); optimizeSegment.getRowkeyStats().addAll(oldSegment.getRowkeyStats()); try { CubeUpdate cubeBuilder = new CubeUpdate(cube); cubeBuilder.setToUpdateSegs(optimizeSegment); mgr.updateCube(cubeBuilder); } catch (IOException e) { logger.error("fail to merge dictionary or lookup snapshots", e); return ExecuteResult.createError(e); } return new ExecuteResult(); } }
} catch (IOException e) { logger.error("fail to save cuboid statistics", e); return ExecuteResult.createError(e);
} catch (IOException e) { logger.error("fail to merge dictionary", e); return ExecuteResult.createError(e);
} catch (IOException e) { logger.error("fail to save cuboid statistics", e); return ExecuteResult.createError(e);
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { try { logger.info("executing:" + getCmd()); final PatternedLogger patternedLogger = new PatternedLogger(logger); final Pair<Integer, String> result = context.getConfig().getCliCommandExecutor().execute(getCmd(), patternedLogger); getManager().addJobInfo(getId(), patternedLogger.getInfo()); return result.getFirst() == 0 ? new ExecuteResult(ExecuteResult.State.SUCCEED, result.getSecond()) : ExecuteResult.createFailed(new ShellException(result.getSecond())); } catch (IOException e) { logger.error("job:" + getId() + " execute finished with exception", e); return ExecuteResult.createError(e); } }
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { final CubeManager mgr = CubeManager.getInstance(context.getConfig()); final CubeInstance cube = mgr.getCube(CubingExecutableUtil.getCubeName(this.getParams())); final CubeSegment newSegment = cube.getSegmentById(CubingExecutableUtil.getSegmentId(this.getParams())); final List<CubeSegment> mergingSegments = getMergingSegments(cube); KylinConfig conf = cube.getConfig(); Collections.sort(mergingSegments); try { checkLookupSnapshotsMustIncremental(mergingSegments); // work on copy instead of cached objects CubeInstance cubeCopy = cube.latestCopyForWrite(); CubeSegment newSegCopy = cubeCopy.getSegmentById(newSegment.getUuid()); makeDictForNewSegment(conf, cubeCopy, newSegCopy, mergingSegments); makeSnapshotForNewSegment(cubeCopy, newSegCopy, mergingSegments); CubeUpdate update = new CubeUpdate(cubeCopy); update.setToUpdateSegs(newSegCopy); mgr.updateCube(update); return ExecuteResult.createSucceed(); } catch (IOException e) { logger.error("fail to merge dictionary or lookup snapshots", e); return ExecuteResult.createError(e); } }
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { KylinConfig kylinConfig = context.getConfig(); CubeManager cubeManager = CubeManager.getInstance(kylinConfig); TableMetadataManager metaMgr = TableMetadataManager.getInstance(kylinConfig); SnapshotManager snapshotMgr = SnapshotManager.getInstance(kylinConfig); CubeInstance cube = cubeManager.getCube(LookupExecutableUtil.getCubeName(this.getParams())); List<String> segmentIDs = LookupExecutableUtil.getSegments(this.getParams()); String lookupTableName = LookupExecutableUtil.getLookupTableName(this.getParams()); CubeDesc cubeDesc = cube.getDescriptor(); try { TableDesc tableDesc = metaMgr.getTableDesc(lookupTableName, cube.getProject()); IReadableTable hiveTable = SourceManager.createReadableTable(tableDesc, null); logger.info("take snapshot for table:" + lookupTableName); SnapshotTable snapshot = snapshotMgr.buildSnapshot(hiveTable, tableDesc, cube.getConfig()); logger.info("update snapshot path to cube metadata"); if (cubeDesc.isGlobalSnapshotTable(lookupTableName)) { LookupExecutableUtil.updateSnapshotPathToCube(cubeManager, cube, lookupTableName, snapshot.getResourcePath()); } else { LookupExecutableUtil.updateSnapshotPathToSegments(cubeManager, cube, segmentIDs, lookupTableName, snapshot.getResourcePath()); } return new ExecuteResult(); } catch (IOException e) { logger.error("fail to build snapshot for:" + lookupTableName, e); return ExecuteResult.createError(e); } }
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { final CubeManager cubeManager = CubeManager.getInstance(context.getConfig()); final CubeInstance cubeCopy = cubeManager.getCube(CubingExecutableUtil.getCubeName(this.getParams())).latestCopyForWrite(); final String segmentId = CubingExecutableUtil.getSegmentId(this.getParams()); final CubeSegment segCopy = cubeCopy.getSegmentById(segmentId); Preconditions.checkNotNull(segCopy, "Cube segment '" + segmentId + "' not found."); Segments<CubeSegment> mergingSegs = cubeCopy.getMergingSegments(segCopy); Preconditions.checkArgument(mergingSegs.size() > 0, "Merging segment not exist."); Collections.sort(mergingSegs); final CubeSegment first = mergingSegs.get(0); final CubeSegment last = mergingSegs.get(mergingSegs.size() - 1); segCopy.setSegRange(new SegmentRange(first.getSegRange().start, last.getSegRange().end)); segCopy.setSourcePartitionOffsetStart(first.getSourcePartitionOffsetStart()); segCopy.setSourcePartitionOffsetEnd(last.getSourcePartitionOffsetEnd()); segCopy.setTSRange(new TSRange(mergingSegs.getTSStart(), mergingSegs.getTSEnd())); CubeUpdate update = new CubeUpdate(cubeCopy); update.setToUpdateSegs(segCopy); try { cubeManager.updateCube(update); return ExecuteResult.createSucceed(); } catch (IOException e) { logger.error("fail to update cube segment offset", e); return ExecuteResult.createError(e); } }
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { final CubeManager cubeManager = CubeManager.getInstance(context.getConfig()); final CubeInstance cube = cubeManager.getCube(CubingExecutableUtil.getCubeName(this.getParams())) .latestCopyForWrite(); final CubeSegment segment = cube.getSegmentById(CubingExecutableUtil.getSegmentId(this.getParams())); CubingJob cubingJob = (CubingJob) getManager().getJob(CubingExecutableUtil.getCubingJobId(this.getParams())); long sourceCount = cubingJob.findSourceRecordCount(); long sourceSizeBytes = cubingJob.findSourceSizeBytes(); long cubeSizeBytes = cubingJob.findCubeSizeBytes(); segment.setLastBuildJobID(CubingExecutableUtil.getCubingJobId(this.getParams())); segment.setLastBuildTime(System.currentTimeMillis()); segment.setSizeKB(cubeSizeBytes / 1024); segment.setInputRecords(sourceCount); segment.setInputRecordsSize(sourceSizeBytes); try { saveExtSnapshotIfNeeded(cubeManager, cube, segment); updateSegment(segment); cubeManager.promoteNewlyBuiltSegments(cube, segment); return new ExecuteResult(); } catch (IOException e) { logger.error("fail to update cube after build", e); return ExecuteResult.createError(e); } }
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { final CubeManager cubeManager = CubeManager.getInstance(context.getConfig()); final CubeInstance cube = cubeManager.getCube(CubingExecutableUtil.getCubeName(this.getParams())); final CubeSegment segment = cube.getSegmentById(CubingExecutableUtil.getSegmentId(this.getParams())); CubeSegment originalSegment = cube.getOriginalSegmentToOptimize(segment); long sourceCount = originalSegment.getInputRecords(); long sourceSizeBytes = originalSegment.getInputRecordsSize(); CubingJob cubingJob = (CubingJob) getManager().getJob(CubingExecutableUtil.getCubingJobId(this.getParams())); long cubeSizeBytes = cubingJob.findCubeSizeBytes(); segment.setLastBuildJobID(CubingExecutableUtil.getCubingJobId(this.getParams())); segment.setLastBuildTime(System.currentTimeMillis()); segment.setSizeKB(cubeSizeBytes / 1024); segment.setInputRecords(sourceCount); segment.setInputRecordsSize(sourceSizeBytes); segment.setDimensionRangeInfoMap(originalSegment.getDimensionRangeInfoMap()); try { cubeManager.promoteNewlyOptimizeSegments(cube, segment); return new ExecuteResult(); } catch (IOException e) { logger.error("fail to update cube after build", e); return ExecuteResult.createError(e); } }
} catch (IOException e) { logger.error("fail to update cube after merge", e); return ExecuteResult.createError(e);
@Override protected void onExecuteError(Throwable exception, ExecutableContext executableContext) { super.onExecuteError(exception, executableContext); onStatusChange(executableContext, ExecuteResult.createError(exception), ExecutableState.ERROR); }