/** * Fallback to clear text passwords in configuration. * @param name * @return clear text password or null */ protected char[] getPasswordFromConfig(String name) { char[] pass = null; if (getBoolean(CredentialProvider.CLEAR_TEXT_FALLBACK, true)) { String passStr = get(name); if (passStr != null) { pass = passStr.toCharArray(); } } return pass; }
public CompactionWorker(final FileSystem fs, final Configuration conf) { this.conf = conf; this.deleteCompacted = conf.getBoolean(CONF_DELETE_COMPACTED, false); this.tmpDir = new Path(conf.get(CONF_TMP_DIR)); this.fs = fs; }
/** * @param htable the {@link org.apache.hadoop.hbase.HTableDescriptor} to scan. */ public void setHTable(Table htable) { Configuration conf = htable.getConfiguration(); logScannerActivity = conf.getBoolean( ScannerCallable.LOG_SCANNER_ACTIVITY, false); logPerRowCount = conf.getInt(LOG_PER_ROW_COUNT, 100); this.htable = htable; }
this.storeConfigInfo = storeConfigInfo; maxCompactSize = conf.getLong(HBASE_HSTORE_COMPACTION_MAX_SIZE_KEY, Long.MAX_VALUE); offPeakMaxCompactSize = conf.getLong(HBASE_HSTORE_COMPACTION_MAX_SIZE_OFFPEAK_KEY, maxCompactSize); minCompactSize = conf.getLong(HBASE_HSTORE_COMPACTION_MIN_SIZE_KEY, storeConfigInfo.getMemStoreFlushSize()); minFilesToCompact = Math.max(2, conf.getInt(HBASE_HSTORE_COMPACTION_MIN_KEY, /*old name*/ conf.getInt("hbase.hstore.compactionThreshold", 3))); maxFilesToCompact = conf.getInt(HBASE_HSTORE_COMPACTION_MAX_KEY, 10); compactionRatio = conf.getFloat(HBASE_HSTORE_COMPACTION_RATIO_KEY, 1.2F); offPeakCompactionRatio = conf.getFloat(HBASE_HSTORE_COMPACTION_RATIO_OFFPEAK_KEY, 5.0F); dateTieredMaxStoreFileAgeMillis = conf.getLong(DATE_TIERED_MAX_AGE_MILLIS_KEY, Long.MAX_VALUE); dateTieredIncomingWindowMin = conf.getInt(DATE_TIERED_INCOMING_WINDOW_MIN_KEY, 6); compactionPolicyForDateTieredWindow = conf.get( COMPACTION_POLICY_CLASS_FOR_DATE_TIERED_WINDOWS_KEY, DEFAULT_COMPACTION_POLICY_CLASS_FOR_DATE_TIERED_WINDOWS.getName()); dateTieredSingleOutputForMinorCompaction = conf .getBoolean(DATE_TIERED_SINGLE_OUTPUT_FOR_MINOR_COMPACTION_KEY, true); this.dateTieredCompactionWindowFactory = conf.get( DATE_TIERED_COMPACTION_WINDOW_FACTORY_CLASS_KEY, DEFAULT_DATE_TIERED_COMPACTION_WINDOW_FACTORY_CLASS.getName());
private void allocateInputStreamSplits() { LOG.info("Initializing " + container.getId().toString() + " input splits"); containerId2InputSplit.putIfAbsent(new XLearningContainerId(container.getId()), new ArrayList<InputSplit>()); if (tfEvaluator) { splitWorkerNum--; LOG.info("Note that current TensorFlow job has the evaluator type. Not allocate the input to the last container."); if (conf.getBoolean(XLearningConfiguration.XLEARNING_INPUT_STREAM_SHUFFLE, XLearningConfiguration.DEFAULT_XLEARNING_INPUT_STREAM_SHUFFLE)) { LOG.info("XLEARNING_INPUT_STREAM_SHUFFLE is true"); for (int i = 0, len = inputFileSplits.length; i < len; i++) { Integer index = i % splitWorkerNum;
@VisibleForTesting static void serializeJobState(FileSystem fs, Path mrJobDir, Configuration conf, JobState jobState, Job job) throws IOException { Path jobStateFilePath = new Path(mrJobDir, JOB_STATE_FILE_NAME); // Write the job state with an empty task set (work units are read by the mapper from a different file) try (DataOutputStream dataOutputStream = new DataOutputStream(fs.create(jobStateFilePath))) { jobState.write(dataOutputStream, false, conf.getBoolean(SERIALIZE_PREVIOUS_WORKUNIT_STATES_KEY, DEFAULT_SERIALIZE_PREVIOUS_WORKUNIT_STATES)); } job.getConfiguration().set(ConfigurationKeys.JOB_STATE_FILE_PATH_KEY, jobStateFilePath.toString()); DistributedCache.addCacheFile(jobStateFilePath.toUri(), job.getConfiguration()); job.getConfiguration().set(ConfigurationKeys.JOB_STATE_DISTRIBUTED_CACHE_NAME, jobStateFilePath.getName()); }
protected void serviceStart() throws Exception { Configuration conf = new XLearningConfiguration(); YarnRPC rpc = YarnRPC.create(conf); initializeWebApp(conf); InetSocketAddress address = conf.getSocketAddr( XLearningConfiguration.XLEARNING_HISTORY_BIND_HOST, XLearningConfiguration.XLEARNING_HISTORY_ADDRESS, conf.get(XLearningConfiguration.XLEARNING_HISTORY_ADDRESS, XLearningConfiguration.DEFAULT_XLEARNING_HISTORY_ADDRESS), conf.getInt(XLearningConfiguration.XLEARNING_HISTORY_PORT, XLearningConfiguration.DEFAULT_XLEARNING_HISTORY_PORT)); server = rpc.getServer(HSClientProtocol.class, protocolHandler, address, conf, jhsDTSecretManager, conf.getInt(XLearningConfiguration.XLEARNING_HISTORY_CLIENT_THREAD_COUNT, XLearningConfiguration.DEFAULT_XLEARNING_HISTORY_CLIENT_THREAD_COUNT)); // Enable service authorization? if (conf.getBoolean( CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) { server.refreshServiceAcl(conf, new ClientHSPolicyProvider()); } server.start(); this.bindAddress = conf.updateConnectAddr(XLearningConfiguration.XLEARNING_HISTORY_BIND_HOST, XLearningConfiguration.XLEARNING_HISTORY_ADDRESS, conf.get(XLearningConfiguration.XLEARNING_HISTORY_ADDRESS, XLearningConfiguration.DEFAULT_XLEARNING_HISTORY_ADDRESS), server.getListenerAddress()); LOG.info("Instantiated HistoryClientService at " + this.bindAddress); super.serviceStart(); }
boolean justStarted = conf.getBoolean(SCRIPT_STARTED_PARAM, true); if (justStarted) { conf.setBoolean(SCRIPT_STARTED_PARAM, false); int sleepTimeMs = conf.getInt(WF_BETWEEN_SLEEP_SECS_PARAM, 10); try { LOG.info("One workflow complete, sleeping for " + sleepTimeMs + " sec(s) before moving to the next one if exists. Hit ctrl-c to exit."); Thread.sleep(sleepTimeMs * 1000L);
/** * Handles common parameter initialization that a subclass might want to leverage. * @param context */ protected void doSetup(Context context) { Configuration conf = context.getConfiguration(); // If a custom separator has been used, // decode it back from Base64 encoding. separator = conf.get(ImportTsv.SEPARATOR_CONF_KEY); if (separator == null) { separator = ImportTsv.DEFAULT_SEPARATOR; } else { separator = new String(Base64.getDecoder().decode(separator)); } // Should never get 0 as we are setting this to a valid value in job // configuration. ts = conf.getLong(ImportTsv.TIMESTAMP_CONF_KEY, 0); skipEmptyColumns = context.getConfiguration().getBoolean( ImportTsv.SKIP_EMPTY_COLUMNS, false); skipBadLines = context.getConfiguration().getBoolean( ImportTsv.SKIP_LINES_CONF_KEY, true); badLineCount = context.getCounter("ImportTsv", "Bad Lines"); logBadLines = context.getConfiguration().getBoolean(ImportTsv.LOG_BAD_LINES_CONF_KEY, false); hfileOutPath = conf.get(ImportTsv.BULK_OUTPUT_CONF_KEY); }
public RpcRetryingCallerFactory(Configuration conf, RetryingCallerInterceptor interceptor) { this.conf = conf; pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE, HConstants.DEFAULT_HBASE_CLIENT_PAUSE); long configuredPauseForCQTBE = conf.getLong(HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE, pause); if (configuredPauseForCQTBE < pause) { LOG.warn("The " + HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE + " setting: " + configuredPauseForCQTBE + " is smaller than " + HConstants.HBASE_CLIENT_PAUSE + ", will use " + pause + " instead."); this.pauseForCQTBE = pause; } else { this.pauseForCQTBE = configuredPauseForCQTBE; } retries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); startLogErrorsCnt = conf.getInt(AsyncProcess.START_LOG_ERRORS_AFTER_COUNT_KEY, AsyncProcess.DEFAULT_START_LOG_ERRORS_AFTER_COUNT); this.interceptor = interceptor; enableBackPressure = conf.getBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE, HConstants.DEFAULT_ENABLE_CLIENT_BACKPRESSURE); rpcTimeout = conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,HConstants.DEFAULT_HBASE_RPC_TIMEOUT); }
TableName tableName = TableName.valueOf(args[0]); conf.set(TABLE_NAME, tableName.getNameAsString()); Path inputDir = new Path(args[1]); Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + tableName)); job.setJarByClass(Importer.class); FileInputFormat.setInputPaths(job, inputDir); job.setInputFormatClass(SequenceFileInputFormat.class); String hfileOutPath = conf.get(BULK_OUTPUT_CONF_KEY); if (hfileOutPath != null && conf.getBoolean(HAS_LARGE_RESULT, false)) { LOG.info("Use Large Result!!"); try (Connection conn = ConnectionFactory.createConnection(conf); job.setMapperClass(CellSortImporter.class); job.setReducerClass(CellReducer.class); Path outputDir = new Path(hfileOutPath); FileOutputFormat.setOutputPath(job, outputDir); job.setMapOutputKeyClass(CellWritableComparable.class); RawComparator.class); Path partitionsPath = new Path(TotalOrderPartitioner.getPartitionFile(job.getConfiguration())); FileSystem fs = FileSystem.get(job.getConfiguration()); fs.deleteOnExit(partitionsPath);
public ThriftConnection(Configuration conf, ExecutorService pool, final User user) throws IOException { this.conf = conf; this.user = user; this.host = conf.get(Constants.HBASE_THRIFT_SERVER_NAME); this.port = conf.getInt(Constants.HBASE_THRIFT_SERVER_PORT, -1); Preconditions.checkArgument(port > 0); Preconditions.checkArgument(host != null); this.isFramed = conf.getBoolean(Constants.FRAMED_CONF_KEY, Constants.FRAMED_CONF_DEFAULT); this.isCompact = conf.getBoolean(Constants.COMPACT_CONF_KEY, Constants.COMPACT_CONF_DEFAULT); this.operationTimeout = conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); this.connectTimeout = conf.getInt(SOCKET_TIMEOUT_CONNECT, DEFAULT_SOCKET_TIMEOUT_CONNECT); String className = conf.get(Constants.HBASE_THRIFT_CLIENT_BUIDLER_CLASS, DefaultThriftClientBuilder.class.getName()); try { Class<?> clazz = Class.forName(className); Constructor<?> constructor = clazz .getDeclaredConstructor(ThriftConnection.class); constructor.setAccessible(true); clientBuilder = (ThriftClientBuilder) constructor.newInstance(this); }catch (Exception e) { throw new IOException(e); } }
boolean retrySplitting = !conf.getBoolean("hbase.hlog.split.skip.errors", WALSplitter.SPLIT_SKIP_ERRORS_DEFAULT); Path logsDirPath = new Path(CommonFSUtils.getWALRootDir(conf), HConstants.HREGION_LOGDIR_NAME); Thread.sleep(conf.getInt("hbase.hlog.split.failure.retry.interval", 30 * 1000));
/** * @param conf * @param family Name of this store's column family * @param minVersions Store's MIN_VERSIONS setting * @param maxVersions Store's VERSIONS setting * @param ttl Store's TTL (in ms) * @param blockSize Store's block size * @param timeToPurgeDeletes duration in ms after which a delete marker can * be purged during a major compaction. * @param keepDeletedCells Store's keepDeletedCells setting * @param comparator The store's comparator */ public ScanInfo(Configuration conf, byte[] family, int minVersions, int maxVersions, long ttl, KeepDeletedCells keepDeletedCells, long blockSize, long timeToPurgeDeletes, CellComparator comparator, boolean newVersionBehavior) { this(family, minVersions, maxVersions, ttl, keepDeletedCells, timeToPurgeDeletes, comparator, conf.getLong(HConstants.TABLE_MAX_ROWSIZE_KEY, HConstants.TABLE_MAX_ROWSIZE_DEFAULT), conf.getBoolean("hbase.storescanner.use.pread", false), getCellsPerTimeoutCheck(conf), conf.getBoolean(StoreScanner.STORESCANNER_PARALLEL_SEEK_ENABLE, false), conf.getLong(StoreScanner.STORESCANNER_PREAD_MAX_BYTES, 4 * blockSize), newVersionBehavior); }
public RegionMonitor(Connection connection, String[] monitorTargets, boolean useRegExp, Sink sink, ExecutorService executor, boolean writeSniffing, TableName writeTableName, boolean treatFailureAsError, HashMap<String, Long> configuredReadTableTimeouts, long configuredWriteTableTimeout, long allowedFailures) { super(connection, monitorTargets, useRegExp, sink, executor, treatFailureAsError, allowedFailures); Configuration conf = connection.getConfiguration(); this.writeSniffing = writeSniffing; this.writeTableName = writeTableName; this.writeDataTTL = conf.getInt(HConstants.HBASE_CANARY_WRITE_DATA_TTL_KEY, DEFAULT_WRITE_DATA_TTL); this.regionsLowerLimit = conf.getFloat(HConstants.HBASE_CANARY_WRITE_PERSERVER_REGIONS_LOWERLIMIT_KEY, 1.0f); this.regionsUpperLimit = conf.getFloat(HConstants.HBASE_CANARY_WRITE_PERSERVER_REGIONS_UPPERLIMIT_KEY, 1.5f); this.checkPeriod = conf.getInt(HConstants.HBASE_CANARY_WRITE_TABLE_CHECK_PERIOD_KEY, DEFAULT_WRITE_TABLE_CHECK_PERIOD); this.rawScanEnabled = conf.getBoolean(HConstants.HBASE_CANARY_READ_RAW_SCAN_KEY, false); this.configuredReadTableTimeouts = new HashMap<>(configuredReadTableTimeouts); this.configuredWriteTableTimeout = configuredWriteTableTimeout; }
public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, Configuration conf) { this(maxSize, blockSize, evictionThread, (int) Math.ceil(1.2 * maxSize / blockSize), DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL, conf.getFloat(LRU_MIN_FACTOR_CONFIG_NAME, DEFAULT_MIN_FACTOR), conf.getFloat(LRU_ACCEPTABLE_FACTOR_CONFIG_NAME, DEFAULT_ACCEPTABLE_FACTOR), conf.getFloat(LRU_SINGLE_PERCENTAGE_CONFIG_NAME, DEFAULT_SINGLE_FACTOR), conf.getFloat(LRU_MULTI_PERCENTAGE_CONFIG_NAME, DEFAULT_MULTI_FACTOR), conf.getFloat(LRU_MEMORY_PERCENTAGE_CONFIG_NAME, DEFAULT_MEMORY_FACTOR), conf.getFloat(LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME, DEFAULT_HARD_CAPACITY_LIMIT_FACTOR), conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, DEFAULT_IN_MEMORY_FORCE_MODE), conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE) ); }
Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX); verifyChecksum = conf.getBoolean(CONF_CHECKSUM_VERIFY, true); filesGroup = conf.get(CONF_FILES_GROUP); filesUser = conf.get(CONF_FILES_USER); filesMode = (short)conf.getInt(CONF_FILES_MODE, 0); outputRoot = new Path(conf.get(CONF_OUTPUT_ROOT)); inputRoot = new Path(conf.get(CONF_INPUT_ROOT)); inputArchive = new Path(inputRoot, HConstants.HFILE_ARCHIVE_DIRECTORY); outputArchive = new Path(outputRoot, HConstants.HFILE_ARCHIVE_DIRECTORY); bufferSize = conf.getInt(CONF_BUFFER_SIZE, defaultBlockSize); LOG.info("Using bufferSize=" + StringUtils.humanReadableInt(bufferSize)); context.getCounter(c).increment(0); if (context.getConfiguration().getBoolean(Testing.CONF_TEST_FAILURE, false)) { testing.failuresCountToInject = conf.getInt(Testing.CONF_TEST_FAILURE_COUNT, 0);
private void prepareTimelineDomain() { TimelineClient timelineClient = null; if (jstormClientContext.conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ENABLED)) { timelineClient = TimelineClient.createTimelineClient(); timelineClient.init(jstormClientContext.conf); timelineClient.start(); } else { LOG.warn("Cannot put the domain " + jstormClientContext.domainId + " because the timeline service is not enabled"); return; } try { TimelineDomain domain = new TimelineDomain(); domain.setId(jstormClientContext.domainId); domain.setReaders( jstormClientContext.viewACLs != null && jstormClientContext.viewACLs.length() > 0 ? jstormClientContext.viewACLs : JOYConstants.BLANK); domain.setWriters( jstormClientContext.modifyACLs != null && jstormClientContext.modifyACLs.length() > 0 ? jstormClientContext.modifyACLs : JOYConstants.BLANK); timelineClient.putDomain(domain); LOG.info("Put the timeline domain: " + TimelineUtils.dumpTimelineRecordtoJSON(domain)); } catch (Exception e) { LOG.error("Error when putting the timeline domain", e); } finally { timelineClient.stop(); } }
private static void split(final Configuration conf, final Path p) throws IOException { FileSystem fs = FSUtils.getWALFileSystem(conf); if (!fs.exists(p)) { throw new FileNotFoundException(p.toString()); } if (!fs.getFileStatus(p).isDirectory()) { throw new IOException(p + " is not a directory"); } final Path baseDir = FSUtils.getWALRootDir(conf); Path archiveDir = new Path(baseDir, HConstants.HREGION_OLDLOGDIR_NAME); if (conf.getBoolean(AbstractFSWALProvider.SEPARATE_OLDLOGDIR, AbstractFSWALProvider.DEFAULT_SEPARATE_OLDLOGDIR)) { archiveDir = new Path(archiveDir, p.getName()); } WALSplitter.split(baseDir, p, archiveDir, fs, conf, WALFactory.getInstance(conf)); }
static String getSargColumnIDsString(Configuration conf) { return conf.getBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, true) ? null : conf.get(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR); }