Refine search
Class.forName("scala.collection.mutable.WrappedArray$ofRef") }; SparkConf conf = new SparkConf().setAppName("Merge dictionary for cube:" + cubeName + ", segment " + segmentId); conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer"); conf.set("spark.kryo.registrator", "org.apache.kylin.engine.spark.KylinKryoRegistrator"); conf.set("spark.kryo.registrationRequired", "true").registerKryoClasses(kryoClassArray);
public static SparkConf getSparkConfBasedOn(SparkSession.Builder sparkSessionBuilder) { try { SparkConf sparkConf = new SparkConf(); Field options = sparkSessionBuilder.getClass().getDeclaredField("org$apache$spark$sql$SparkSession$Builder$$options"); options.setAccessible(true); Iterator iterator = ((scala.collection.mutable.HashMap) options.get(sparkSessionBuilder)).iterator(); while (iterator.hasNext()) { Tuple2 x = (Tuple2) iterator.next(); sparkConf.set((String) (x)._1, (String) (x)._2); } return sparkConf; } catch (Exception e) { throw new RuntimeException(e); } }
SparkConf conf = new SparkConf().setMaster(master).setAppName("basicavgwithkyro"); conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer"); conf.set("spark.kryo.registrator", AvgRegistrator.class.getName()); JavaSparkContext sc = new JavaSparkContext(conf); JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 2, 3, 4));
private SparkConf createSparkConf(List<SparkConfiguration.Configuration> configurations, SparkConf old) { SparkConf sparkConf = new SparkConf(); sparkConf.set(SPARK_EXTRA_LISTENERS, old.get(SPARK_EXTRA_LISTENERS)); sparkConf.set(BEAKERX_ID, old.get(BEAKERX_ID)); if (old.contains(SPARK_APP_NAME)) { sparkConf.set(SPARK_APP_NAME, old.get(SPARK_APP_NAME)); } configurations.forEach(x -> { if (x.getName() != null) { sparkConf.set(x.getName(), (x.getValue() != null) ? x.getValue() : ""); } }); return sparkConf; }
public static void main(String[] args) throws Exception { if (args.length != 2) { throw new Exception("Usage BasicLoadJson [sparkMaster] [cassandraHost]"); } String sparkMaster = args[0]; String cassandraHost = args[1]; SparkConf conf = new SparkConf(true) .set("spark.cassandra.connection.host", cassandraHost); JavaSparkContext sc = new JavaSparkContext( sparkMaster, "basicquerycassandra", conf); // entire table as an RDD // assumes your table test was created as CREATE TABLE test.kv(key text PRIMARY KEY, value int); JavaRDD<CassandraRow> data = javaFunctions(sc).cassandraTable("test" , "kv"); // print some basic stats System.out.println(data.mapToDouble(new DoubleFunction<CassandraRow>() { public double call(CassandraRow row) { return row.getInt("value"); }}).stats()); // write some basic data to Cassandra ArrayList<KeyValue> input = new ArrayList<KeyValue>(); input.add(KeyValue.newInstance("mostmagic", 3)); JavaRDD<KeyValue> kvRDD = sc.parallelize(input); javaFunctions(kvRDD, KeyValue.class).saveToCassandra("test", "kv"); } public static class KeyValue implements Serializable {
@Test public void offHeapConfigurationBackwardsCompatibility() { // Tests backwards-compatibility with the old `spark.unsafe.offHeap` configuration, which // was deprecated in Spark 1.6 and replaced by `spark.memory.offHeap.enabled` (see SPARK-12251). final SparkConf conf = new SparkConf() .set("spark.unsafe.offHeap", "true") .set("spark.memory.offHeap.size", "1000"); final TaskMemoryManager manager = new TaskMemoryManager(new TestMemoryManager(conf), 0); Assert.assertSame(MemoryMode.OFF_HEAP, manager.tungstenMemoryMode); }
@Test public void offHeapConfigurationBackwardsCompatibility() { // Tests backwards-compatibility with the old `spark.unsafe.offHeap` configuration, which // was deprecated in Spark 1.6 and replaced by `spark.memory.offHeap.enabled` (see SPARK-12251). final SparkConf conf = new SparkConf() .set("spark.unsafe.offHeap", "true") .set("spark.memory.offHeap.size", "1000"); final TaskMemoryManager manager = new TaskMemoryManager(new TestMemoryManager(conf), 0); Assert.assertSame(MemoryMode.OFF_HEAP, manager.tungstenMemoryMode); }
@Test public void offHeapConfigurationBackwardsCompatibility() { // Tests backwards-compatibility with the old `spark.unsafe.offHeap` configuration, which // was deprecated in Spark 1.6 and replaced by `spark.memory.offHeap.enabled` (see SPARK-12251). final SparkConf conf = new SparkConf() .set("spark.unsafe.offHeap", "true") .set("spark.memory.offHeap.size", "1000"); final TaskMemoryManager manager = new TaskMemoryManager(new TestMemoryManager(conf), 0); Assert.assertSame(MemoryMode.OFF_HEAP, manager.tungstenMemoryMode); }
memoryManager = new TestMemoryManager( new SparkConf() .set("spark.memory.offHeap.enabled", "" + useOffHeapMemoryAllocator()) .set("spark.memory.offHeap.size", "256mb") .set("spark.shuffle.spill.compress", "false") .set("spark.shuffle.compress", "false")); taskMemoryManager = new TaskMemoryManager(memoryManager, 0);
memoryManager = new TestMemoryManager( new SparkConf() .set("spark.memory.offHeap.enabled", "" + useOffHeapMemoryAllocator()) .set("spark.memory.offHeap.size", "256mb") .set("spark.shuffle.spill.compress", "false") .set("spark.shuffle.compress", "false")); taskMemoryManager = new TaskMemoryManager(memoryManager, 0);
memoryManager = new TestMemoryManager( new SparkConf() .set("spark.memory.offHeap.enabled", "" + useOffHeapMemoryAllocator()) .set("spark.memory.offHeap.size", "256mb") .set("spark.shuffle.spill.compress", "false") .set("spark.shuffle.compress", "false")); taskMemoryManager = new TaskMemoryManager(memoryManager, 0);
@Test(expected = AssertionError.class) public void callingFreePageOnDirectlyAllocatedPageTriggersAssertionError() { final TaskMemoryManager manager = new TaskMemoryManager( new TestMemoryManager(new SparkConf().set("spark.memory.offHeap.enabled", "false")), 0); final MemoryConsumer c = new TestMemoryConsumer(manager, MemoryMode.ON_HEAP); final MemoryBlock dataPage = MemoryAllocator.HEAP.allocate(256); manager.freePage(dataPage, c); }
@Test(expected = AssertionError.class) public void freeingPageDirectlyInAllocatorTriggersAssertionError() { final TaskMemoryManager manager = new TaskMemoryManager( new TestMemoryManager(new SparkConf().set("spark.memory.offHeap.enabled", "false")), 0); final MemoryConsumer c = new TestMemoryConsumer(manager, MemoryMode.ON_HEAP); final MemoryBlock dataPage = manager.allocatePage(256, c); MemoryAllocator.HEAP.free(dataPage); }
@Test(expected = AssertionError.class) public void callingFreePageOnDirectlyAllocatedPageTriggersAssertionError() { final TaskMemoryManager manager = new TaskMemoryManager( new TestMemoryManager(new SparkConf().set("spark.memory.offHeap.enabled", "false")), 0); final MemoryConsumer c = new TestMemoryConsumer(manager, MemoryMode.ON_HEAP); final MemoryBlock dataPage = MemoryAllocator.HEAP.allocate(256); manager.freePage(dataPage, c); }
@Test(expected = AssertionError.class) public void freeingPageDirectlyInAllocatorTriggersAssertionError() { final TaskMemoryManager manager = new TaskMemoryManager( new TestMemoryManager(new SparkConf().set("spark.memory.offHeap.enabled", "false")), 0); final MemoryConsumer c = new TestMemoryConsumer(manager, MemoryMode.ON_HEAP); final MemoryBlock dataPage = manager.allocatePage(256, c); MemoryAllocator.HEAP.free(dataPage); }
@Test public void freeingPageSetsPageNumberToSpecialConstant() { final TaskMemoryManager manager = new TaskMemoryManager( new TestMemoryManager(new SparkConf().set("spark.memory.offHeap.enabled", "false")), 0); final MemoryConsumer c = new TestMemoryConsumer(manager, MemoryMode.ON_HEAP); final MemoryBlock dataPage = manager.allocatePage(256, c); c.freePage(dataPage); Assert.assertEquals(MemoryBlock.FREED_IN_ALLOCATOR_PAGE_NUMBER, dataPage.pageNumber); }