/** * Gets the accumulator with the given name. Returns {@code null}, if no accumulator with * that name was produced. * * @param accumulatorName The name of the accumulator * @param <A> The generic type of the accumulator value * @return The value of the accumulator with the given name */ public <A> A getAccumulator(ExecutionEnvironment env, String accumulatorName) { JobExecutionResult result = env.getLastJobExecutionResult(); Preconditions.checkNotNull(result, "No result found for job, was execute() called before getting the result?"); return result.getAccumulatorResult(id + SEPARATOR + accumulatorName); } }
this.latestExecutionResult = env.getLastJobExecutionResult();
Map<String, Object> accumulators = env.getLastJobExecutionResult().getAllAccumulatorResults(); LOGGER.info("== COUNTERS =="); for (Map.Entry<String, Object> e : accumulators.entrySet()) {
JobExecutionResult result = env.getLastJobExecutionResult();
public static void main(final String[] args) throws Exception { final ParameterTool params = ParameterTool.fromArgs(args); final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); // make parameters available in the web interface env.getConfig().setGlobalJobParameters(params); // get the data set final DataSet<StringTriple> file = getDataSet(env, params); // filter lines with empty fields final DataSet<StringTriple> filteredLines = file.filter(new EmptyFieldFilter()); // Here, we could do further processing with the filtered lines... JobExecutionResult result; // output the filtered lines if (params.has("output")) { filteredLines.writeAsCsv(params.get("output")); // execute program result = env.execute("Accumulator example"); } else { System.out.println("Printing result to stdout. Use --output to specify output path."); filteredLines.print(); result = env.getLastJobExecutionResult(); } // get the accumulator result via its registration key final List<Integer> emptyFields = result.getAccumulatorResult(EMPTY_FIELD_ACCUMULATOR); System.out.format("Number of detected empty fields per column: %s\n", emptyFields); }
/** * Method to create and add lines to a csv-file * * @param env given ExecutionEnvironment * @throws IOException exeption during file writing */ private static void writeCSV(ExecutionEnvironment env) throws IOException { String head = String .format("%s|%s|%s|%s|%s|%s%n", "Parallelism", "dataset", "vertex-label", "edge-label", "verification", "Runtime(s)"); String tail = String .format("%s|%s|%s|%s|%s|%s%n", env.getParallelism(), INPUT_PATH, VERTEX_LABEL, EDGE_LABEL, VERIFICATION, env.getLastJobExecutionResult().getNetRuntime(TimeUnit.SECONDS)); File f = new File(CSV_PATH); if (f.exists() && !f.isDirectory()) { FileUtils.writeStringToFile(f, tail, true); } else { PrintWriter writer = new PrintWriter(CSV_PATH, "UTF-8"); writer.print(head); writer.print(tail); writer.close(); } }
/** * Method to crate and add lines to a benchmark file. * * @param env given ExecutionEnvironment * @param sampling sampling algorithm under test * @throws IOException exception during file writing */ private static void writeBenchmark(ExecutionEnvironment env, SamplingAlgorithm sampling) throws IOException { String head = String.format("%s|%s|%s|%s|%s%n", "Parallelism", "Dataset", "Algorithm", "Params", "Runtime [s]"); // build log String samplingName = sampling.getClass().getSimpleName(); String tail = String.format("%s|%s|%s|%s|%s%n", env.getParallelism(), INPUT_PATH.substring(INPUT_PATH.lastIndexOf(File.separator) + 1), samplingName, String.join(", ", CONSTRUCTOR_PARAMS), env.getLastJobExecutionResult().getNetRuntime(TimeUnit.SECONDS)); File f = new File(OUTPUT_PATH + OUTPUT_PATH_BENCHMARK_SUFFIX); if (f.exists() && !f.isDirectory()) { FileUtils.writeStringToFile(f, tail, true); } else { PrintWriter writer = new PrintWriter(OUTPUT_PATH + OUTPUT_PATH_BENCHMARK_SUFFIX, "UTF-8"); writer.print(head); writer.print(tail); writer.close(); } }
/** * Method to create and add lines to a csv-file * * @param env given ExecutionEnvironment * @throws IOException exception during file writing */ private static void writeCSV(ExecutionEnvironment env) throws IOException { String head = String.format("%s|%s|%s%n", "Parallelism", "dataset", "Runtime(s)"); String tail = String.format("%s|%s|%s%n", env.getParallelism(), INPUT_PATH, env.getLastJobExecutionResult().getNetRuntime(TimeUnit.SECONDS)); File f = new File(CSV_PATH); if (f.exists() && !f.isDirectory()) { FileUtils.writeStringToFile(f, tail, true); } else { PrintWriter writer = new PrintWriter(CSV_PATH, "UTF-8"); writer.print(head); writer.print(tail); writer.close(); } }
/** * Method to create and add lines to a csv-file * * @param env given ExecutionEnvironment * @throws IOException exeption during file writing */ private static void writeCSV(ExecutionEnvironment env) throws IOException { String head = String.format("%s|%s|%s|%s|%s%n", "Parallelism", "dataset", "query", "usedStatistics", "Runtime(s)"); String tail = String.format("%s|%s|%s|%s|%s%n", env.getParallelism(), INPUT_PATH, QUERY, HAS_STATISTICS, env.getLastJobExecutionResult().getNetRuntime(TimeUnit.SECONDS)); File f = new File(CSV_PATH); if (f.exists() && !f.isDirectory()) { FileUtils.writeStringToFile(f, tail, true); } else { PrintWriter writer = new PrintWriter(CSV_PATH, "UTF-8"); writer.print(head); writer.print(tail); writer.close(); } }
/** * Gets the accumulator with the given name. Returns {@code null}, if no accumulator with * that name was produced. * * @param accumulatorName The name of the accumulator * @param <A> The generic type of the accumulator value * @return The value of the accumulator with the given name */ public <A> A getAccumulator(ExecutionEnvironment env, String accumulatorName) { JobExecutionResult result = env.getLastJobExecutionResult(); Preconditions.checkNotNull(result, "No result found for job, was execute() called before getting the result?"); return result.getAccumulatorResult(id + SEPARATOR + accumulatorName); } }
/** * Gets the accumulator with the given name. Returns {@code null}, if no accumulator with * that name was produced. * * @param accumulatorName The name of the accumulator * @param <A> The generic type of the accumulator value * @return The value of the accumulator with the given name */ public <A> A getAccumulator(ExecutionEnvironment env, String accumulatorName) { JobExecutionResult result = env.getLastJobExecutionResult(); Preconditions.checkNotNull(result, "No result found for job, was execute() called before getting the result?"); return result.getAccumulatorResult(id + SEPARATOR + accumulatorName); } }
/** * Gets the accumulator with the given name. Returns {@code null}, if no accumulator with * that name was produced. * * @param accumulatorName The name of the accumulator * @param <A> The generic type of the accumulator value * @return The value of the accumulator with the given name */ public <A> A getAccumulator(ExecutionEnvironment env, String accumulatorName) { JobExecutionResult result = env.getLastJobExecutionResult(); Preconditions.checkNotNull(result, "No result found for job, was execute() called before getting the result?"); return result.getAccumulatorResult(id + SEPARATOR + accumulatorName); } }
/** * Method to create and add lines to a csv-file * @throws IOException */ private static void writeCSV() throws IOException { String head = String.format("%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s%n", "Parallelism", "dataset", "vertexKeys", "edgeKeys", "USE_VERTEX_LABELS", "USE_EDGE_LABELS", "Vertex Aggregators", "Vertex-Aggregator-Keys", "EPGMEdge-Aggregators", "EPGMEdge-Aggregator-Keys", "Runtime(s)"); String tail = String.format("%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s%n", getExecutionEnvironment().getParallelism(), INPUT_PATH, VERTEX_GROUPING_KEYS, EDGE_GROUPING_KEYS, USE_VERTEX_LABELS, USE_EDGE_LABELS, VERTEX_AGGREGATORS, VERTEX_AGGREGATOR_KEYS, EDGE_AGGREGATORS, EDGE_AGGREGATOR_KEYS, getExecutionEnvironment().getLastJobExecutionResult() .getNetRuntime(TimeUnit.SECONDS)); File f = new File(CSV_PATH); if (f.exists() && !f.isDirectory()) { FileUtils.writeStringToFile(f, tail, true); } else { PrintWriter writer = new PrintWriter(CSV_PATH, "UTF-8"); writer.print(head); writer.print(tail); writer.close(); } }
/** * Returns a string containing information about the benchmark run. * * @return benchmark result string */ private String getResultString() { return String.format("%s|%s|%s|%s|%s|%s", inputPath, getExecutionEnvironment().getParallelism(), traverserStrategy.name(), query, embeddingCount, getExecutionEnvironment().getLastJobExecutionResult().getNetRuntime()); }
public static void main(final String[] args) throws Exception { if (!parseParameters(args)) { return; } final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); // get the data set final DataSet<Tuple> file = getDataSet(env); // filter lines with empty fields final DataSet<Tuple> filteredLines = file.filter(new EmptyFieldFilter()); // Here, we could do further processing with the filtered lines... JobExecutionResult result; // output the filtered lines if (outputPath == null) { filteredLines.print(); result = env.getLastJobExecutionResult(); } else { filteredLines.writeAsCsv(outputPath); // execute program result = env.execute("Accumulator example"); } // get the accumulator result via its registration key final List<Integer> emptyFields = result.getAccumulatorResult(EMPTY_FIELD_ACCUMULATOR); System.out.format("Number of detected empty fields per column: %s\n", emptyFields); }
public static void main(final String[] args) throws Exception { final ParameterTool params = ParameterTool.fromArgs(args); final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); // make parameters available in the web interface env.getConfig().setGlobalJobParameters(params); // get the data set final DataSet<StringTriple> file = getDataSet(env, params); // filter lines with empty fields final DataSet<StringTriple> filteredLines = file.filter(new EmptyFieldFilter()); // Here, we could do further processing with the filtered lines... JobExecutionResult result; // output the filtered lines if (params.has("output")) { filteredLines.writeAsCsv(params.get("output")); // execute program result = env.execute("Accumulator example"); } else { System.out.println("Printing result to stdout. Use --output to specify output path."); filteredLines.print(); result = env.getLastJobExecutionResult(); } // get the accumulator result via its registration key final List<Integer> emptyFields = result.getAccumulatorResult(EMPTY_FIELD_ACCUMULATOR); System.out.format("Number of detected empty fields per column: %s\n", emptyFields); }
public static void main(final String[] args) throws Exception { final ParameterTool params = ParameterTool.fromArgs(args); final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); // make parameters available in the web interface env.getConfig().setGlobalJobParameters(params); // get the data set final DataSet<StringTriple> file = getDataSet(env, params); // filter lines with empty fields final DataSet<StringTriple> filteredLines = file.filter(new EmptyFieldFilter()); // Here, we could do further processing with the filtered lines... JobExecutionResult result; // output the filtered lines if (params.has("output")) { filteredLines.writeAsCsv(params.get("output")); // execute program result = env.execute("Accumulator example"); } else { System.out.println("Printing result to stdout. Use --output to specify output path."); filteredLines.print(); result = env.getLastJobExecutionResult(); } // get the accumulator result via its registration key final List<Integer> emptyFields = result.getAccumulatorResult(EMPTY_FIELD_ACCUMULATOR); System.out.format("Number of detected empty fields per column: %s\n", emptyFields); }
public static void main(final String[] args) throws Exception { final ParameterTool params = ParameterTool.fromArgs(args); final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); // make parameters available in the web interface env.getConfig().setGlobalJobParameters(params); // get the data set final DataSet<StringTriple> file = getDataSet(env, params); // filter lines with empty fields final DataSet<StringTriple> filteredLines = file.filter(new EmptyFieldFilter()); // Here, we could do further processing with the filtered lines... JobExecutionResult result; // output the filtered lines if (params.has("output")) { filteredLines.writeAsCsv(params.get("output")); // execute program result = env.execute("Accumulator example"); } else { System.out.println("Printing result to stdout. Use --output to specify output path."); filteredLines.print(); result = env.getLastJobExecutionResult(); } // get the accumulator result via its registration key final List<Integer> emptyFields = result.getAccumulatorResult(EMPTY_FIELD_ACCUMULATOR); System.out.format("Number of detected empty fields per column: %s\n", emptyFields); }
/** * {@inheritDoc} */ @Override public double updateModel(DataFlink<DataInstance> dataUpdate) { try { Configuration config = new Configuration(); config.setString(BN_NAME, this.dag.getName()); config.setBytes(EFBN_NAME, Serialization.serializeObject(efBayesianNetwork)); DataSet<DataInstance> dataset = dataUpdate.getDataSet(); this.sumSS = dataset.map(new SufficientSatisticsMAP()) .withParameters(config) .reduce(new SufficientSatisticsReduce()) .collect().get(0); //Add the prior sumSS.sum(efBayesianNetwork.createInitSufficientStatistics()); JobExecutionResult result = dataset.getExecutionEnvironment().getLastJobExecutionResult(); numInstances = result.getAccumulatorResult(ParallelMaximumLikelihood.COUNTER_NAME+"_"+this.dag.getName()); numInstances++;//Initial counts }catch(Exception ex){ throw new UndeclaredThrowableException(ex); } return this.getLogMarginalProbability(); }
/** * {@inheritDoc} */ @Override public double updateModel(DataFlink<DataInstance> dataUpdate) { try { this.initLearning(); Configuration config = new Configuration(); config.setString(ParameterLearningAlgorithm.BN_NAME, this.dag.getName()); config.setBytes(EFBN_NAME, Serialization.serializeObject(efBayesianNetwork)); DataSet<DataInstance> dataset = dataUpdate.getDataSet(); this.sumSS = dataset.mapPartition(new SufficientSatisticsMAP()) .withParameters(config) .reduce(new SufficientSatisticsReduce()) .collect().get(0); //Add the prior sumSS.sum(efBayesianNetwork.createInitSufficientStatistics()); JobExecutionResult result = dataset.getExecutionEnvironment().getLastJobExecutionResult(); numInstances = result.getAccumulatorResult(ParallelMaximumLikelihood2.COUNTER_NAME+"_"+this.dag.getName()); numInstances++;//Initial counts }catch(Exception ex){ throw new UndeclaredThrowableException(ex); } return this.getLogMarginalProbability(); }