public static ExecuteResult createSucceed() { return new ExecuteResult(State.SUCCEED, "succeed"); }
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { KylinConfig config = context.getConfig(); StringBuffer output = new StringBuffer(); try { output.append(cleanUpIntermediateFlatTable(config)); // don't drop view to avoid concurrent issue //output.append(cleanUpHiveViewIntermediateTable(config)); } catch (IOException e) { logger.error("job:" + getId() + " execute finished with exception", e); return ExecuteResult.createError(e); } return new ExecuteResult(ExecuteResult.State.SUCCEED, output.toString()); }
return ExecuteResult.createSucceed(); } catch (IOException e) { logger.error("fail to merge cuboid statistics", e); return ExecuteResult.createError(e);
return result == 0 ? new ExecuteResult(ExecuteResult.State.SUCCEED, log.toString()) : ExecuteResult.createFailed(new HadoopShellException(log.toString())); } catch (ReflectiveOperationException e) { logger.error("error getMapReduceJobClass, class name:" + getParam(KEY_MR_JOB), e); return ExecuteResult.createError(e); } catch (Exception e) { logger.error("error execute " + this.toString(), e); return ExecuteResult.createError(e);
protected void onExecuteFinished(ExecuteResult result, ExecutableContext executableContext) { setEndTime(System.currentTimeMillis()); if (!isDiscarded() && !isRunnable()) { if (result.succeed()) { getManager().updateJobOutput(getId(), ExecutableState.SUCCEED, null, result.output()); } else { getManager().updateJobOutput(getId(), ExecutableState.ERROR, null, result.output()); } } }
@Override protected void onExecuteError(Throwable exception, ExecutableContext executableContext) { super.onExecuteError(exception, executableContext); onStatusChange(executableContext, ExecuteResult.createError(exception), ExecutableState.ERROR); }
@Override protected void onExecuteFinished(ExecuteResult result, ExecutableContext executableContext) { super.onExecuteFinished(result, executableContext); if (!isDiscarded() && result.succeed()) { List<? extends Executable> jobs = getTasks(); boolean allSucceed = true; for (Executable task : jobs) { final ExecutableState status = task.getStatus(); if (status != ExecutableState.SUCCEED) { allSucceed = false; } } if (allSucceed) { // Add last optimization time CubeManager cubeManager = CubeManager.getInstance(executableContext.getConfig()); CubeInstance cube = cubeManager.getCube(getCubeName()); CubeInstance copyForWrite = cube.latestCopyForWrite(); try { copyForWrite.setCuboidLastOptimized(getEndTime()); CubeUpdate cubeUpdate = new CubeUpdate(copyForWrite); cubeManager.updateCube(cubeUpdate); } catch (IOException e) { logger.error("Failed to update last optimized for " + getCubeName(), e); } } } }
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { try { Thread.sleep(1000); } catch (InterruptedException e) { } return ExecuteResult.createSucceed(); } }
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { try { logger.info("executing:" + getCmd()); final PatternedLogger patternedLogger = new PatternedLogger(logger); final Pair<Integer, String> result = context.getConfig().getCliCommandExecutor().execute(getCmd(), patternedLogger); getManager().addJobInfo(getId(), patternedLogger.getInfo()); return result.getFirst() == 0 ? new ExecuteResult(ExecuteResult.State.SUCCEED, result.getSecond()) : ExecuteResult.createFailed(new ShellException(result.getSecond())); } catch (IOException e) { logger.error("job:" + getId() + " execute finished with exception", e); return ExecuteResult.createError(e); } }
@Test public void testExecute() throws ExecuteException { UpdateSnapshotCacheForQueryServersStep step = new UpdateSnapshotCacheForQueryServersStep(); ExecuteResult result = step.doWork(new DefaultContext(Maps.<String, Executable>newConcurrentMap(), kylinConfig)); System.out.println(result.output()); assertTrue(result.succeed()); } }
@Override protected void onExecuteError(Throwable exception, ExecutableContext executableContext) { super.onExecuteError(exception, executableContext); onStatusChange(executableContext, ExecuteResult.createError(exception), ExecutableState.ERROR); }
setEndTime(System.currentTimeMillis()); notifyUserStatusChange(ExecutableState.DISCARDED); } else if (result.succeed()) { List<? extends Executable> jobs = getTasks(); boolean allSucceed = true;
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { try { Thread.sleep(5000); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } return ExecuteResult.createSucceed(); } }
public static ExecuteResult createFailed(Throwable throwable) { Preconditions.checkArgument(throwable != null, "throwable cannot be null"); return new ExecuteResult(State.FAILED, throwable.getLocalizedMessage(), throwable); }
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { final CubeManager cubeManager = CubeManager.getInstance(context.getConfig()); final CubeInstance cube = cubeManager.getCube(CubingExecutableUtil.getCubeName(this.getParams())); Set<Long> recommendCuboids = cube.getCuboidsRecommend(); try { List<CubeSegment> newSegments = cube.getSegments(SegmentStatusEnum.READY_PENDING); Map<Long, Long> recommendCuboidsWithStats = CuboidStatsReaderUtil .readCuboidStatsFromSegments(recommendCuboids, newSegments); if (recommendCuboidsWithStats == null) { throw new RuntimeException("Fail to get statistics info for recommended cuboids after optimization!!!"); } cubeManager.promoteCheckpointOptimizeSegments(cube, recommendCuboidsWithStats, newSegments.toArray(new CubeSegment[newSegments.size()])); return new ExecuteResult(); } catch (Exception e) { logger.error("fail to update cube after build", e); return ExecuteResult.createError(e); } }
return new ExecuteResult(ExecuteResult.State.SUCCEED, "skipped"); log.append(stringWriter.toString()).append("\n"); log.append("result code:").append(2); return new ExecuteResult(ExecuteResult.State.ERROR, log.toString(), ex); if (status == JobStepStatusEnum.KILLED) { mgr.updateJobOutput(getId(), ExecutableState.ERROR, hadoopCmdOutput.getInfo(), "killed by admin"); return new ExecuteResult(ExecuteResult.State.FAILED, "killed by admin"); return new ExecuteResult(ExecuteResult.State.SUCCEED, output.toString()); } else { return ExecuteResult.createFailed(new MapReduceException(output.toString())); return new ExecuteResult(ExecuteResult.State.DISCARDED, output.toString()); } else { return new ExecuteResult(ExecuteResult.State.STOPPED, output.toString()); return ExecuteResult.createError(e); } catch (Exception e) { logger.error("error execute " + this.toString(), e); return ExecuteResult.createError(e);
return ExecuteResult.createSucceed(); } catch (IOException e) { logger.error("fail to save cuboid statistics", e); return ExecuteResult.createError(e);
setEndTime(System.currentTimeMillis()); onStatusChange(executableContext, result, ExecutableState.STOPPED); } else if (result.succeed()) { List<? extends Executable> jobs = getTasks(); boolean allSucceed = true; mgr.updateJobOutput(getId(), ExecutableState.ERROR, null, result.output()); onStatusChange(executableContext, result, ExecutableState.ERROR);
setEndTime(System.currentTimeMillis()); notifyUserStatusChange(ExecutableState.DISCARDED); } else if (result.succeed()) { List<? extends Executable> jobs = getTasks(); boolean allSucceed = true;
public static ExecuteResult createError(Throwable throwable) { Preconditions.checkArgument(throwable != null, "throwable cannot be null"); return new ExecuteResult(State.ERROR, throwable.getLocalizedMessage(), throwable); }