Refine search
public static void main(String[] args) throws Exception { final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); env.fromElements(1, 2).output(new DiscardingOutputFormat<Integer>()); env.execute().getNetRuntime(); } }
public static void main(String[] args) throws Exception { final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); env.getConfig().setGlobalJobParameters(params); env.setParallelism(parallelism); Map<String, Object> accumulators = env.getLastJobExecutionResult().getAllAccumulatorResults(); LOGGER.info("== COUNTERS =="); for (Map.Entry<String, Object> e : accumulators.entrySet()) {
public static void main(final String[] args) throws Exception { final ParameterTool params = ParameterTool.fromArgs(args); final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); // make parameters available in the web interface env.getConfig().setGlobalJobParameters(params); // get the data set final DataSet<StringTriple> file = getDataSet(env, params); // filter lines with empty fields final DataSet<StringTriple> filteredLines = file.filter(new EmptyFieldFilter()); // Here, we could do further processing with the filtered lines... JobExecutionResult result; // output the filtered lines if (params.has("output")) { filteredLines.writeAsCsv(params.get("output")); // execute program result = env.execute("Accumulator example"); } else { System.out.println("Printing result to stdout. Use --output to specify output path."); filteredLines.print(); result = env.getLastJobExecutionResult(); } // get the accumulator result via its registration key final List<Integer> emptyFields = result.getAccumulatorResult(EMPTY_FIELD_ACCUMULATOR); System.out.format("Number of detected empty fields per column: %s\n", emptyFields); }
protected void executeProgram(PackagedProgram program, ClusterClient<?> client, int parallelism) throws ProgramMissingJobException, ProgramInvocationException { logAndSysout("Starting execution of program"); final JobSubmissionResult result = client.run(program, parallelism); if (null == result) { throw new ProgramMissingJobException("No JobSubmissionResult returned, please make sure you called " + "ExecutionEnvironment.execute()"); } if (result.isJobExecutionResult()) { logAndSysout("Program execution finished"); JobExecutionResult execResult = result.getJobExecutionResult(); System.out.println("Job with JobID " + execResult.getJobID() + " has finished."); System.out.println("Job Runtime: " + execResult.getNetRuntime() + " ms"); Map<String, Object> accumulatorsResult = execResult.getAllAccumulatorResults(); if (accumulatorsResult.size() > 0) { System.out.println("Accumulator Results: "); System.out.println(AccumulatorHelper.getResultsFormatted(accumulatorsResult)); } } else { logAndSysout("Job has been submitted with JobID " + result.getJobID()); } }
private void executeAndRunAssertions(ExecutionEnvironment env) throws Exception { try { JobExecutionResult result = env.execute(); assertTrue(result.getNetRuntime() >= 0); assertNotNull(result.getAllAccumulatorResults()); assertTrue(result.getAllAccumulatorResults().isEmpty()); } catch (JobExecutionException e) { fail("The program should have succeeded on the second run"); } }
@Override protected void postSubmit() throws Exception { compareResultsByLinesInMemory(EXPECTED, resultPath); // Test accumulator results System.out.println("Accumulator results:"); JobExecutionResult res = this.result; System.out.println(AccumulatorHelper.getResultsFormatted(res.getAllAccumulatorResults())); Assert.assertEquals(Integer.valueOf(3), res.getAccumulatorResult("num-lines")); Assert.assertEquals(Integer.valueOf(3), res.getIntCounterResult("num-lines")); Assert.assertEquals(Double.valueOf(getParallelism()), res.getAccumulatorResult("open-close-counter")); // Test histogram (words per line distribution) Map<Integer, Integer> dist = new HashMap<>(); dist.put(1, 1); dist.put(2, 1); dist.put(3, 1); Assert.assertEquals(dist, res.getAccumulatorResult("words-per-line")); // Test distinct words (custom accumulator) Set<StringValue> distinctWords = new HashSet<>(); distinctWords.add(new StringValue("one")); distinctWords.add(new StringValue("two")); distinctWords.add(new StringValue("three")); Assert.assertEquals(distinctWords, res.getAccumulatorResult("distinct-words")); }
public static void main(String[] args) throws Exception { final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); env.fromElements(1, 2).output(new DiscardingOutputFormat<Integer>()); env.execute().getAccumulatorResult(ACCUMULATOR_NAME); } }
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(6); env.getConfig().disableSysoutLogging(); env.generateSequence(1, 1000000) JobExecutionResult result = env.execute(); assertEquals(1000000L, result.getAllAccumulatorResults().get(accName));
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(4); env.setRestartStrategy(RestartStrategies.noRestart()); env.getConfig().disableSysoutLogging(); JobExecutionResult res = env.execute(); String msg = res == null ? "null result" : "result in " + res.getNetRuntime() + " ms"; fail("The program should have failed, but returned " + msg);
public static void main(String[] args) throws Exception { final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); env.fromElements(1, 2).output(new DiscardingOutputFormat<Integer>()); env.execute().getAllAccumulatorResults(); } }
@Override protected void testProgram() throws Exception { // test verifying the number of records read and written vs the accumulator counts readCalls = new ConcurrentLinkedQueue<Integer>(); writeCalls = new ConcurrentLinkedQueue<Integer>(); final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); env.createInput(new TestInputFormat(new Path(inputPath))).output(new TestOutputFormat()); JobExecutionResult result = env.execute(); Object a = result.getAllAccumulatorResults().get("DATA_SOURCE_ACCUMULATOR"); Object b = result.getAllAccumulatorResults().get("DATA_SINK_ACCUMULATOR"); long recordsRead = (Long) a; long recordsWritten = (Long) b; assertEquals(recordsRead, readCalls.size()); assertEquals(recordsWritten, writeCalls.size()); }
public static void main(String[] args) throws Exception { final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); env.fromElements(1, 2).output(new DiscardingOutputFormat<Integer>()); env.execute().getJobID(); } }
/** * Convenience method to get the elements of a DataSet as a List. * As DataSet can contain a lot of data, this method should be used with caution. * * @return A List containing the elements of the DataSet */ public List<T> collect() throws Exception { final String id = new AbstractID().toString(); final TypeSerializer<T> serializer = getType().createSerializer(getExecutionEnvironment().getConfig()); this.output(new Utils.CollectHelper<>(id, serializer)).name("collect()"); JobExecutionResult res = getExecutionEnvironment().execute(); ArrayList<byte[]> accResult = res.getAccumulatorResult(id); if (accResult != null) { try { return SerializedListAccumulator.deserializeList(accResult, serializer); } catch (ClassNotFoundException e) { throw new RuntimeException("Cannot find type class of collected data type.", e); } catch (IOException e) { throw new RuntimeException("Serialization error while deserializing collected data", e); } } else { throw new RuntimeException("The call to collect() could not retrieve the DataSet."); } }
/** * Convenience method to get the count (number of elements) of a DataSet * as well as the checksum (sum over element hashes). * * @return A ChecksumHashCode that represents the count and checksum of elements in the data set. * @deprecated replaced with {@code org.apache.flink.graph.asm.dataset.ChecksumHashCode} in Gelly */ @Deprecated public static <T> Utils.ChecksumHashCode checksumHashCode(DataSet<T> input) throws Exception { final String id = new AbstractID().toString(); input.output(new Utils.ChecksumHashCodeHelper<T>(id)).name("ChecksumHashCode"); JobExecutionResult res = input.getExecutionEnvironment().execute(); return res.<Utils.ChecksumHashCode> getAccumulatorResult(id); }
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); env.registerCachedFile(tmpPlanFilesPath.toUri().toString(), FLINK_PYTHON_DC_ID, true); JobExecutionResult jer = env.execute(); long runtime = jer.getNetRuntime(); streamer.sendRecord(runtime);
/** * Method to create and add lines to a csv-file * * @param env given ExecutionEnvironment * @throws IOException exeption during file writing */ private static void writeCSV(ExecutionEnvironment env) throws IOException { String head = String .format("%s|%s|%s|%s|%s|%s%n", "Parallelism", "dataset", "vertex-label", "edge-label", "verification", "Runtime(s)"); String tail = String .format("%s|%s|%s|%s|%s|%s%n", env.getParallelism(), INPUT_PATH, VERTEX_LABEL, EDGE_LABEL, VERIFICATION, env.getLastJobExecutionResult().getNetRuntime(TimeUnit.SECONDS)); File f = new File(CSV_PATH); if (f.exists() && !f.isDirectory()) { FileUtils.writeStringToFile(f, tail, true); } else { PrintWriter writer = new PrintWriter(CSV_PATH, "UTF-8"); writer.print(head); writer.print(tail); writer.close(); } }
@Test public void testAccumulator() { try { final int numElements = 100; ExecutionEnvironment env = ExecutionEnvironment.createCollectionsEnvironment(); env.generateSequence(1, numElements) .map(new CountingMapper()) .output(new DiscardingOutputFormat<Long>()); JobExecutionResult result = env.execute(); assertTrue(result.getNetRuntime() >= 0); assertEquals(numElements, (int) result.getAccumulatorResult(ACCUMULATOR_NAME)); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
JobExecutionResult result = env.getLastJobExecutionResult(); json.writeEndObject(); json.writeStringField("job_id", result.getJobID().toString()); json.writeNumberField("runtime_ms", result.getNetRuntime()); for (Map.Entry<String, String> entry : env.getConfig().getGlobalJobParameters().toMap().entrySet()) { json.writeStringField(entry.getKey(), entry.getValue()); for (Map.Entry<String, Object> entry : result.getAllAccumulatorResults().entrySet()) { json.writeStringField(entry.getKey(), entry.getValue().toString());
/** * Gets the accumulator with the given name. Returns {@code null}, if no accumulator with * that name was produced. * * @param accumulatorName The name of the accumulator * @param <A> The generic type of the accumulator value * @return The value of the accumulator with the given name */ public <A> A getAccumulator(ExecutionEnvironment env, String accumulatorName) { JobExecutionResult result = env.getLastJobExecutionResult(); Preconditions.checkNotNull(result, "No result found for job, was execute() called before getting the result?"); return result.getAccumulatorResult(id + SEPARATOR + accumulatorName); } }
@Override public FlinkRunnerResult run(Pipeline pipeline) { LOG.info("Executing pipeline using FlinkPipelineRunner."); LOG.info("Translating pipeline to Flink program."); this.flinkJobEnv.translate(pipeline); LOG.info("Starting execution of Flink program."); JobExecutionResult result; try { result = this.flinkJobEnv.executePipeline(); } catch (Exception e) { LOG.error("Pipeline execution failed", e); throw new RuntimeException("Pipeline execution failed", e); } LOG.info("Execution finished in {} msecs", result.getNetRuntime()); Map<String, Object> accumulators = result.getAllAccumulatorResults(); if (accumulators != null && !accumulators.isEmpty()) { LOG.info("Final aggregator values:"); for (Map.Entry<String, Object> entry : result.getAllAccumulatorResults().entrySet()) { LOG.info("{} : {}", entry.getKey(), entry.getValue()); } } return new FlinkRunnerResult(accumulators, result.getNetRuntime()); }