private static void resetUmaskInConf(Configuration conf, boolean unsetUmask, String origUmask) { if (unsetUmask) { if (origUmask != null) { conf.set(FsPermission.UMASK_LABEL, origUmask); } else { conf.unset(FsPermission.UMASK_LABEL); } } }
private static void resetUmaskInConf(Configuration conf, boolean unsetUmask, String origUmask) { if (unsetUmask) { if (origUmask != null) { conf.set(FsPermission.UMASK_LABEL, origUmask); } else { conf.unset(FsPermission.UMASK_LABEL); } } }
/** * Check if nested column paths is set for 'conf'. * If set, create a copy of 'conf' with this property unset. */ private Configuration unsetNestedColumnPaths(Configuration conf) { if (conf.get(ColumnProjectionUtils.READ_NESTED_COLUMN_PATH_CONF_STR) != null) { Configuration confCopy = new Configuration(conf); confCopy.unset(ColumnProjectionUtils.READ_NESTED_COLUMN_PATH_CONF_STR); return confCopy; } return conf; }
/** * Check if nested column paths is set for 'conf'. * If set, create a copy of 'conf' with this property unset. */ private Configuration unsetNestedColumnPaths(Configuration conf) { if (conf.get(ColumnProjectionUtils.READ_NESTED_COLUMN_PATH_CONF_STR) != null) { Configuration confCopy = new Configuration(conf); confCopy.unset(ColumnProjectionUtils.READ_NESTED_COLUMN_PATH_CONF_STR); return confCopy; } return conf; } }
@Override public void remove(String key) { config.unset(getInternalKey(key)); }
/** * decorate the Configuration object to make replication more receptive to delays: * lessen the timeout and numTries. */ private void decorateConf() { this.conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, this.conf.getInt("replication.sink.client.retries.number", 4)); this.conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, this.conf.getInt("replication.sink.client.ops.timeout", 10000)); String replicationCodec = this.conf.get(HConstants.REPLICATION_CODEC_CONF_KEY); if (StringUtils.isNotEmpty(replicationCodec)) { this.conf.set(HConstants.RPC_CODEC_CONF_KEY, replicationCodec); } // use server ZK cluster for replication, so we unset the client ZK related properties if any if (this.conf.get(HConstants.CLIENT_ZOOKEEPER_QUORUM) != null) { this.conf.unset(HConstants.CLIENT_ZOOKEEPER_QUORUM); } }
private void unset(final String key, final String value) { if (value == null) { c.unset(key); } else { c.set(key, value); } }
/** * Enable a basic on-heap cache for these jobs. Any BlockCache implementation based on * direct memory will likely cause the map tasks to OOM when opening the region. This * is done here instead of in TableSnapshotRegionRecordReader in case an advanced user * wants to override this behavior in their job. */ public static void resetCacheConfig(Configuration conf) { conf.setFloat( HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT); conf.setFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 0f); conf.unset(HConstants.BUCKET_CACHE_IOENGINE_KEY); }
/** * Sets the acidOperationalProperties in the configuration object argument. * @param conf Mutable configuration object * @param properties An acidOperationalProperties object to initialize from. If this is null, * we assume this is a full transactional table. */ public static void setAcidOperationalProperties( Configuration conf, boolean isTxnTable, AcidOperationalProperties properties) { if (isTxnTable) { HiveConf.setBoolVar(conf, ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN, isTxnTable); if (properties != null) { HiveConf.setIntVar(conf, ConfVars.HIVE_TXN_OPERATIONAL_PROPERTIES, properties.toInt()); } } else { conf.unset(ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN.varname); conf.unset(ConfVars.HIVE_TXN_OPERATIONAL_PROPERTIES.varname); } }
protected void walToHFiles(List<String> dirPaths, List<String> tableList) throws IOException { Tool player = new WALPlayer(); // Player reads all files in arbitrary directory structure and creates // a Map task for each file. We use ';' as separator // because WAL file names contains ',' String dirs = StringUtils.join(dirPaths, ';'); String jobname = "Incremental_Backup-" + backupId ; Path bulkOutputPath = getBulkOutputDir(); conf.set(WALPlayer.BULK_OUTPUT_CONF_KEY, bulkOutputPath.toString()); conf.set(WALPlayer.INPUT_FILES_SEPARATOR_KEY, ";"); conf.setBoolean(WALPlayer.MULTI_TABLES_SUPPORT, true); conf.set(JOB_NAME_CONF_KEY, jobname); String[] playerArgs = { dirs, StringUtils.join(tableList, ",") }; try { player.setConf(conf); int result = player.run(playerArgs); if(result != 0) { throw new IOException("WAL Player failed"); } conf.unset(WALPlayer.INPUT_FILES_SEPARATOR_KEY); conf.unset(JOB_NAME_CONF_KEY); } catch (IOException e) { throw e; } catch (Exception ee) { throw new IOException("Can not convert from directory " + dirs + " (check Hadoop, HBase and WALPlayer M/R job logs) ", ee); } }
public static void configureCompression(Configuration config, HiveCompressionCodec compressionCodec) { boolean compression = compressionCodec != HiveCompressionCodec.NONE; config.setBoolean(COMPRESSRESULT.varname, compression); config.setBoolean("mapred.output.compress", compression); config.setBoolean(FileOutputFormat.COMPRESS, compression); // For DWRF config.set(HIVE_ORC_DEFAULT_COMPRESS.varname, compressionCodec.getOrcCompressionKind().name()); config.set(HIVE_ORC_COMPRESSION.varname, compressionCodec.getOrcCompressionKind().name()); // For ORC config.set(OrcTableProperties.COMPRESSION.getPropName(), compressionCodec.getOrcCompressionKind().name()); // For RCFile and Text if (compressionCodec.getCodec().isPresent()) { config.set("mapred.output.compression.codec", compressionCodec.getCodec().get().getName()); config.set(FileOutputFormat.COMPRESS_CODEC, compressionCodec.getCodec().get().getName()); } else { config.unset("mapred.output.compression.codec"); config.unset(FileOutputFormat.COMPRESS_CODEC); } // For Parquet config.set(ParquetOutputFormat.COMPRESSION, compressionCodec.getParquetCompressionCodec().name()); // For SequenceFile config.set(FileOutputFormat.COMPRESS_TYPE, BLOCK.toString()); }
/** * Create a 'smarter' Connection, one that is capable of by-passing RPC if the request is to the * local server; i.e. a short-circuit Connection. Safe to use going to local or remote server. */ private ClusterConnection createClusterConnection() throws IOException { Configuration conf = this.conf; if (conf.get(HConstants.CLIENT_ZOOKEEPER_QUORUM) != null) { // Use server ZK cluster for server-issued connections, so we clone // the conf and unset the client ZK related properties conf = new Configuration(this.conf); conf.unset(HConstants.CLIENT_ZOOKEEPER_QUORUM); } // Create a cluster connection that when appropriate, can short-circuit and go directly to the // local server if the request is to the local server bypassing RPC. Can be used for both local // and remote invocations. ClusterConnection conn = ConnectionUtils.createShortCircuitConnection(conf, null, userProvider.getCurrent(), serverName, rpcServices, rpcServices); // This is used to initialize the batch thread pool inside the connection implementation. // When deploy a fresh cluster, we may first use the cluster connection in InitMetaProcedure, // which will be executed inside the PEWorker, and then the batch thread pool will inherit the // thread group of PEWorker, which will be destroy when shutting down the ProcedureExecutor. It // will cause lots of procedure related UTs to fail, so here let's initialize it first, no harm. conn.getTable(TableName.META_TABLE_NAME).close(); return conn; }
private void setFs() throws IOException { if(this.dfsCluster == null){ LOG.info("Skipping setting fs because dfsCluster is null"); return; } FileSystem fs = this.dfsCluster.getFileSystem(); FSUtils.setFsDefault(this.conf, new Path(fs.getUri())); // re-enable this check with dfs conf.unset(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE); }
@Test public void testRecoveredReplicationSourceShipperGetPosition() throws Exception { String walGroupId = "fake-wal-group-id"; ServerName serverName = ServerName.valueOf("www.example.com", 12006, 1524679704418L); ServerName deadServer = ServerName.valueOf("www.deadServer.com", 12006, 1524679704419L); PriorityBlockingQueue<Path> queue = new PriorityBlockingQueue<>(); queue.put(new Path("/www/html/test")); RecoveredReplicationSource source = Mockito.mock(RecoveredReplicationSource.class); Server server = Mockito.mock(Server.class); Mockito.when(server.getServerName()).thenReturn(serverName); Mockito.when(source.getServer()).thenReturn(server); Mockito.when(source.getServerWALsBelongTo()).thenReturn(deadServer); ReplicationQueueStorage storage = Mockito.mock(ReplicationQueueStorage.class); Mockito.when(storage.getWALPosition(Mockito.eq(serverName), Mockito.any(), Mockito.any())) .thenReturn(1001L); Mockito.when(storage.getWALPosition(Mockito.eq(deadServer), Mockito.any(), Mockito.any())) .thenReturn(-1L); conf.setInt("replication.source.maxretriesmultiplier", -1); RecoveredReplicationSourceShipper shipper = new RecoveredReplicationSourceShipper(conf, walGroupId, queue, source, storage); Assert.assertEquals(1001L, shipper.getStartPosition()); conf.unset("replication.source.maxretriesmultiplier"); } }
@Before public void setUp() throws Exception { TEST_UTIL.getConfiguration().set("dfs.data.transfer.protection", protection); if (StringUtils.isBlank(encryptionAlgorithm) && StringUtils.isBlank(cipherSuite)) { TEST_UTIL.getConfiguration().setBoolean(DFS_ENCRYPT_DATA_TRANSFER_KEY, false); } else { TEST_UTIL.getConfiguration().setBoolean(DFS_ENCRYPT_DATA_TRANSFER_KEY, true); } if (StringUtils.isBlank(encryptionAlgorithm)) { TEST_UTIL.getConfiguration().unset(DFS_DATA_ENCRYPTION_ALGORITHM_KEY); } else { TEST_UTIL.getConfiguration().set(DFS_DATA_ENCRYPTION_ALGORITHM_KEY, encryptionAlgorithm); } if (StringUtils.isBlank(cipherSuite)) { TEST_UTIL.getConfiguration().unset(DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY); } else { TEST_UTIL.getConfiguration().set(DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY, cipherSuite); } TEST_UTIL.startMiniDFSCluster(3); FS = TEST_UTIL.getDFSCluster().getFileSystem(); testDirOnTestFs = new Path("/" + name.getMethodName().replaceAll("[^0-9a-zA-Z]", "_")); FS.mkdirs(testDirOnTestFs); entryptionTestDirOnTestFs = new Path("/" + testDirOnTestFs.getName() + "_enc"); FS.mkdirs(entryptionTestDirOnTestFs); createEncryptionZone(); }
@Before public void init() { conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setVar(conf, ConfVars.CONNECTION_USER_NAME, "dummyUser"); MetastoreConf.setVar(conf, ConfVars.PWD, "dummyPass"); conf.unset(ConfVars.CONNECTION_POOLING_TYPE.getVarname()); }
@Test public void testCleanFinishedHandler() throws Exception { TableName tableName = TableName.valueOf(name.getMethodName()); Configuration conf = UTIL.getConfiguration(); try { conf.setLong(SnapshotManager.HBASE_SNAPSHOT_SENTINELS_CLEANUP_TIMEOUT_MILLIS, 5 * 1000L); SnapshotManager manager = getNewManager(conf, 1); TakeSnapshotHandler handler = Mockito.mock(TakeSnapshotHandler.class); assertFalse("Manager is in process when there is no current handler", manager.isTakingSnapshot(tableName)); manager.setSnapshotHandlerForTesting(tableName, handler); Mockito.when(handler.isFinished()).thenReturn(false); assertTrue(manager.isTakingAnySnapshot()); assertTrue("Manager isn't in process when handler is running", manager.isTakingSnapshot(tableName)); Mockito.when(handler.isFinished()).thenReturn(true); assertFalse("Manager is process when handler isn't running", manager.isTakingSnapshot(tableName)); assertTrue(manager.isTakingAnySnapshot()); Thread.sleep(6 * 1000); assertFalse(manager.isTakingAnySnapshot()); } finally { conf.unset(SnapshotManager.HBASE_SNAPSHOT_SENTINELS_CLEANUP_TIMEOUT_MILLIS); } }
@Test public void testSplitOffStripeOffPeak() throws Exception { // for HBASE-11439 Configuration conf = HBaseConfiguration.create(); // Test depends on this not being set to pass. Default breaks test. TODO: Revisit. conf.unset("hbase.hstore.compaction.min.size"); conf.setInt(StripeStoreConfig.MIN_FILES_KEY, 2); // Select the last 2 files. StripeCompactionPolicy.StripeInformationProvider si = createStripesWithSizes(0, 0, new Long[] { defaultSplitSize - 2, 1L, 1L }); assertEquals(2, createPolicy(conf).selectCompaction(si, al(), false).getRequest().getFiles() .size()); // Make sure everything is eligible in offpeak. conf.setFloat("hbase.hstore.compaction.ratio.offpeak", 500f); assertEquals(3, createPolicy(conf).selectCompaction(si, al(), true).getRequest().getFiles() .size()); }