metaUrl, segmentId, StringUtil.splitByComma(segmentIds), statOutputPath, tblColRefs, sConf)); colToDictPathRDD.coalesce(1, false).saveAsNewAPIHadoopFile(dictOutputPath, Text.class, Text.class, SequenceFileOutputFormat.class);
String debugPath = "/biomarker/output/rnae/debug"; deleteDirectoryAndIgnoreException(debugPath); debugRDD.coalesce(1,true).saveAsTextFile(debugPath);
String debugPath = "/biomarker/output/rnae/debug"; deleteDirectoryAndIgnoreException(debugPath); debugRDD.coalesce(1,true).saveAsTextFile(debugPath);
rankedProducts.coalesce(1,true).saveAsTextFile(outputPath);
rankedProducts.coalesce(1,true).saveAsTextFile(outputPath);
rankedProducts.coalesce(1,true).saveAsTextFile(outputPath);
rankedProducts.coalesce(1,true).saveAsTextFile(outputPath);
loadedGraphRDD = loadedGraphRDD.coalesce(this.workers); else if (loadedGraphRDD.partitions().size() < this.workers) // ensures that the loaded graphRDD does not have less partitions than workers loadedGraphRDD = loadedGraphRDD.repartition(this.workers);
.coalesce(16 * cfg.parallelism) .map(entry -> { GenericRecord genericRecord
.coalesce(16 * cfg.parallelism) .map(entry -> { GenericRecord genericRecord
loadedGraphRDD = loadedGraphRDD.coalesce(this.workers); else if (loadedGraphRDD.partitions().size() < this.workers) // ensures that the loaded graphRDD does not have less partitions than workers loadedGraphRDD = loadedGraphRDD.repartition(this.workers);
loadedGraphRDD = loadedGraphRDD.coalesce(this.workers); } else {
metaUrl, segmentId, StringUtil.splitByComma(segmentIds), statOutputPath, tblColRefs, sConf)); colToDictPathRDD.coalesce(1, false).saveAsNewAPIHadoopFile(dictOutputPath, Text.class, Text.class, SequenceFileOutputFormat.class);