public MiniSparkShim(Configuration conf, int numberOfTaskTrackers, String nameNode, int numDir) throws IOException { mr = new MiniSparkOnYARNCluster("sparkOnYarn"); conf.set("fs.defaultFS", nameNode); conf.set("yarn.resourcemanager.scheduler.class", "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler"); // disable resource monitoring, although it should be off by default conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_CONTROL_RESOURCE_MONITORING, false); conf.setInt(YarnConfiguration.YARN_MINICLUSTER_NM_PMEM_MB, 2048); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 512); conf.setInt(FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_MB, 512); conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, 2048); configureImpersonation(conf); mr.init(conf); mr.start(); this.conf = mr.getConfig(); }
public void updateConfiguration(Configuration config) { copy(resourcesConfiguration, config); // this is to prevent dfs client from doing reverse DNS lookups to determine whether nodes are rack local config.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, NoOpDNSToSwitchMapping.class, DNSToSwitchMapping.class); if (socksProxy != null) { config.setClass(HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY, SocksSocketFactory.class, SocketFactory.class); config.set(HADOOP_SOCKS_SERVER_KEY, socksProxy.toString()); } if (domainSocketPath != null) { config.setStrings(DFS_DOMAIN_SOCKET_PATH_KEY, domainSocketPath); } // only enable short circuit reads if domain socket path is properly configured if (!config.get(DFS_DOMAIN_SOCKET_PATH_KEY, "").trim().isEmpty()) { config.setBooleanIfUnset(DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true); } config.setInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, toIntExact(dfsTimeout.toMillis())); config.setInt(IPC_PING_INTERVAL_KEY, toIntExact(ipcPingInterval.toMillis())); config.setInt(IPC_CLIENT_CONNECT_TIMEOUT_KEY, toIntExact(dfsConnectTimeout.toMillis())); config.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, dfsConnectMaxRetries); if (isHdfsWireEncryptionEnabled) { config.set(HADOOP_RPC_PROTECTION, "privacy"); config.setBoolean("dfs.encrypt.data.transfer", true); } config.setInt("fs.cache.max-size", fileSystemMaxCacheSize); config.setInt(LineRecordReader.MAX_LINE_LENGTH, textMaxLineLength); configureCompression(config, compressionCodec); s3ConfigurationUpdater.updateConfiguration(config); }
@BeforeClass public static void setUpBeforeClass() throws Exception { TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100); TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250); TEST_UTIL.getConfiguration().setInt("hbase.client.retries.number", 6); TEST_UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 30); TEST_UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HANDLER_COUNT, 30); TEST_UTIL.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true); TEST_UTIL.startMiniCluster(3); }
@Test public void testBoundsGreaterThanDefault() throws Exception { final int temp = CONF.getInt(NUM_REGION_GROUPS, DEFAULT_NUM_REGION_GROUPS); try { CONF.setInt(NUM_REGION_GROUPS, temp * 4); final String parallelism = Integer.toString(temp * 4); int errCode = WALPerformanceEvaluation.innerMain(new Configuration(CONF), new String[] { "-threads", parallelism, "-verify", "-noclosefs", "-iterations", "3000", "-regions", parallelism }); assertEquals(0, errCode); } finally { CONF.setInt(NUM_REGION_GROUPS, temp); } }
@Test public void testZKConfigLoading() throws Exception { Configuration conf = HBaseConfiguration.create(); // Test that we read only from the config instance // (i.e. via hbase-default.xml and hbase-site.xml) conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, 2181); Properties props = ZKConfig.makeZKProps(conf); assertEquals("Property client port should have been default from the HBase config", "2181", props.getProperty("clientPort")); }
/** * Test, on HDFS, that the FileLink is still readable * even when the current file gets renamed. */ @Test public void testHDFSLinkReadDuringRename() throws Exception { HBaseTestingUtility testUtil = new HBaseTestingUtility(); Configuration conf = testUtil.getConfiguration(); conf.setInt("dfs.blocksize", 1024 * 1024); conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024); testUtil.startMiniDFSCluster(1); MiniDFSCluster cluster = testUtil.getDFSCluster(); FileSystem fs = cluster.getFileSystem(); assertEquals("hdfs", fs.getUri().getScheme()); try { testLinkReadDuringRename(fs, testUtil.getDefaultRootDirPath()); } finally { testUtil.shutdownMiniCluster(); } }
@Test public void testThrottlingByMaxRitPercent() throws Exception { // Set max balancing time to 500 ms and max percent of regions in transition to 0.05 TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_BALANCER_MAX_BALANCING, 500); TEST_UTIL.getConfiguration().setDouble(HConstants.HBASE_MASTER_BALANCER_MAX_RIT_PERCENT, 0.05); TEST_UTIL.startMiniCluster(2); TableName tableName = createTable("testThrottlingByMaxRitPercent"); final HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); unbalance(master, tableName); AtomicInteger maxCount = new AtomicInteger(0); AtomicBoolean stop = new AtomicBoolean(false); Thread checker = startBalancerChecker(master, maxCount, stop); master.balance(); stop.set(true); checker.interrupt(); checker.join(); // The max number of regions in transition is 100 * 0.05 = 5 assertTrue("max regions in transition: " + maxCount.get(), maxCount.get() == 5); TEST_UTIL.deleteTable(tableName); }
@Test public void testNonDefaultConfigurationProperties() { final Configuration conf = getDefaultHBaseConfiguration(); final HRegionServer rs = mockRegionServer(conf); final int period = RegionSizeReportingChore.REGION_SIZE_REPORTING_CHORE_PERIOD_DEFAULT + 1; final long delay = RegionSizeReportingChore.REGION_SIZE_REPORTING_CHORE_DELAY_DEFAULT + 1L; final String timeUnit = TimeUnit.SECONDS.name(); conf.setInt(RegionSizeReportingChore.REGION_SIZE_REPORTING_CHORE_PERIOD_KEY, period); conf.setLong(RegionSizeReportingChore.REGION_SIZE_REPORTING_CHORE_DELAY_KEY, delay); conf.set(RegionSizeReportingChore.REGION_SIZE_REPORTING_CHORE_TIMEUNIT_KEY, timeUnit); RegionSizeReportingChore chore = new RegionSizeReportingChore(rs); assertEquals(delay, chore.getInitialDelay()); assertEquals(period, chore.getPeriod()); assertEquals(TimeUnit.valueOf(timeUnit), chore.getTimeUnit()); }
/** * Test a table creation including a coprocessor path * which is on the classpath * @result Table will be created with the coprocessor */ @Test public void testCreationClasspathCoprocessor() throws Exception { Configuration conf = UTIL.getConfiguration(); // load coprocessor under test conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, CoprocessorWhitelistMasterObserver.class.getName()); conf.setStrings(CoprocessorWhitelistMasterObserver.CP_COPROCESSOR_WHITELIST_PATHS_KEY, new String[]{}); // set retries low to raise exception quickly conf.setInt("hbase.client.retries.number", 5); UTIL.startMiniCluster(); HTableDescriptor htd = new HTableDescriptor(TEST_TABLE); HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY); htd.addFamily(hcd); htd.addCoprocessor(TestRegionObserver.class.getName()); Connection connection = ConnectionFactory.createConnection(conf); Admin admin = connection.getAdmin(); LOG.info("Creating Table"); admin.createTable(htd); // ensure table was created and coprocessor is added to table LOG.info("Done Creating Table"); Table t = connection.getTable(TEST_TABLE); assertEquals(1, t.getTableDescriptor().getCoprocessors().size()); } }
@Test public void testSplitOffStripeOffPeak() throws Exception { // for HBASE-11439 Configuration conf = HBaseConfiguration.create(); // Test depends on this not being set to pass. Default breaks test. TODO: Revisit. conf.unset("hbase.hstore.compaction.min.size"); conf.setInt(StripeStoreConfig.MIN_FILES_KEY, 2); // Select the last 2 files. StripeCompactionPolicy.StripeInformationProvider si = createStripesWithSizes(0, 0, new Long[] { defaultSplitSize - 2, 1L, 1L }); assertEquals(2, createPolicy(conf).selectCompaction(si, al(), false).getRequest().getFiles() .size()); // Make sure everything is eligible in offpeak. conf.setFloat("hbase.hstore.compaction.ratio.offpeak", 500f); assertEquals(3, createPolicy(conf).selectCompaction(si, al(), true).getRequest().getFiles() .size()); }
/** * Test recover lease eventually succeeding. */ @Test public void testRecoverLease() throws IOException { HTU.getConfiguration().setInt("hbase.lease.recovery.dfs.timeout", 1000); CancelableProgressable reporter = Mockito.mock(CancelableProgressable.class); Mockito.when(reporter.progress()).thenReturn(true); DistributedFileSystem dfs = Mockito.mock(DistributedFileSystem.class); // Fail four times and pass on the fifth. Mockito.when(dfs.recoverLease(FILE)). thenReturn(false).thenReturn(false).thenReturn(false).thenReturn(false).thenReturn(true); assertTrue(this.fsHDFSUtils.recoverDFSFileLease(dfs, FILE, HTU.getConfiguration(), reporter)); Mockito.verify(dfs, Mockito.times(5)).recoverLease(FILE); // Make sure we waited at least hbase.lease.recovery.dfs.timeout * 3 (the first two // invocations will happen pretty fast... the we fall into the longer wait loop). assertTrue((EnvironmentEdgeManager.currentTime() - this.startTime) > (3 * HTU.getConfiguration().getInt("hbase.lease.recovery.dfs.timeout", 61000))); }
@Test public void testNothingToCompactFromL0() throws Exception { Configuration conf = HBaseConfiguration.create(); conf.setInt(StripeStoreConfig.MIN_FILES_L0_KEY, 4); StripeCompactionPolicy.StripeInformationProvider si = createStripesL0Only(3, 10); StripeCompactionPolicy policy = createPolicy(conf); verifyNoCompaction(policy, si); si = createStripes(3, KEY_A); verifyNoCompaction(policy, si); }
@BeforeClass public static void setupBeforeClass() throws Exception { conf = TEST_UTIL.getConfiguration(); // Enable favored nodes based load balancer conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, LoadOnlyFavoredStochasticBalancer.class, LoadBalancer.class); conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 30000); conf.setInt("hbase.master.balancer.stochastic.moveCost", 0); conf.setBoolean("hbase.master.balancer.stochastic.execute.maxSteps", true); conf.set(BaseLoadBalancer.TABLES_ON_MASTER, "none"); }
private static Configuration buildSpnegoConfiguration(String serverPrincipal, File serverKeytab) { Configuration conf = new Configuration(); KerberosName.setRules("DEFAULT"); conf.setInt(HttpServer.HTTP_MAX_THREADS, TestHttpServer.MAX_THREADS); // Enable Kerberos (pre-req) conf.set("hbase.security.authentication", "kerberos"); conf.set(HttpServer.HTTP_UI_AUTHENTICATION, "kerberos"); conf.set(HttpServer.HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_KEY, serverPrincipal); conf.set(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KEYTAB_KEY, serverKeytab.getAbsolutePath()); return conf; }
@BeforeClass public static void before() throws Exception { Configuration conf = UTIL.getConfiguration(); conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, CustomObserver.class.getName()); conf.setStrings( CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, MasterSyncObserver.class.getName(), CPMasterObserver.class.getName()); conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 5); conf.setBoolean(QuotaUtil.QUOTA_CONF_KEY, true); conf.setClass("hbase.coprocessor.regionserver.classes", CPRegionServerObserver.class, RegionServerObserver.class); UTIL.startMiniCluster(); waitForQuotaInitialize(UTIL); ADMIN = UTIL.getAdmin(); }
@BeforeClass public static void beforeClass() throws Exception { UTIL.startMiniCluster(3); Configuration c = new Configuration(UTIL.getConfiguration()); // Tests to 4 retries every 5 seconds. Make it try every 1 second so more // responsive. 1 second is default as is ten retries. c.setLong("hbase.client.pause", 1000); c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 10); connection = ConnectionFactory.createConnection(c); }
@BeforeClass public static void setup() throws Exception { Configuration conf = new Configuration(); conf.setInt(HttpServer.HTTP_MAX_THREADS, MAX_THREADS); server = createTestServer(conf); server.addServlet("echo", "/echo", EchoServlet.class); server.addServlet("echomap", "/echomap", EchoMapServlet.class); server.addServlet("htmlcontent", "/htmlcontent", HtmlContentServlet.class); server.addServlet("longheader", "/longheader", LongHeaderServlet.class); server.addJerseyResourcePackage( JerseyResource.class.getPackage().getName(), "/jersey/*"); server.start(); baseUrl = getServerURL(server); LOG.info("HTTP server started: "+ baseUrl); }
protected static void initTestingUtility(HBaseTestingUtility util, String zkParent) { util.setZkCluster(ZK_UTIL.getZkCluster()); Configuration conf = util.getConfiguration(); conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, zkParent); conf.setInt("replication.source.size.capacity", 102400); conf.setLong("replication.source.sleepforretries", 100); conf.setInt("hbase.regionserver.maxlogs", 10); conf.setLong("hbase.master.logcleaner.ttl", 10); conf.setInt("zookeeper.recovery.retry", 1); conf.setInt("zookeeper.recovery.retry.intervalmill", 10); conf.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100); conf.setInt("replication.stats.thread.period.seconds", 5); conf.setBoolean("hbase.tests.use.shortcircuit.reads", false); conf.setLong("replication.sleep.before.failover", 2000); conf.setInt("replication.source.maxretriesmultiplier", 10); conf.setFloat("replication.source.ratio", 1.0f); conf.setBoolean("replication.source.eof.autorecovery", true); }
public static void setUpTestBeforeClass() throws Exception { TEST_UTIL = new HBaseTestingUtility(); TEST_UTIL.getConfiguration().setFloat( "hbase.master.balancer.stochastic.tableSkewCost", 6000); TEST_UTIL.getConfiguration().set( HConstants.HBASE_MASTER_LOADBALANCER_CLASS, RSGroupBasedLoadBalancer.class.getName()); TEST_UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, RSGroupAdminEndpoint.class.getName() + "," + CPMasterObserver.class.getName()); TEST_UTIL.startMiniCluster(NUM_SLAVES_BASE - 1); TEST_UTIL.getConfiguration().setInt( ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, NUM_SLAVES_BASE - 1); TEST_UTIL.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true); initialize(); }
@Before public void setUp() throws IOException { conf = TEST_UTIL.getConfiguration(); this.conf.set("dfs.datanode.data.dir.perm", "700"); conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, INDEX_BLOCK_SIZE); conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE, BLOOM_BLOCK_SIZE); conf.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, cacheCompressedData); cowType.modifyConf(conf); conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, cowType.shouldBeCached(BlockType.DATA)); conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, cowType.shouldBeCached(BlockType.LEAF_INDEX)); conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, cowType.shouldBeCached(BlockType.BLOOM_CHUNK)); cacheConf = new CacheConfig(conf, blockCache); fs = HFileSystem.get(conf); }