public RpcRetryingCallerFactory(Configuration conf, RetryingCallerInterceptor interceptor) { this.conf = conf; pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE, HConstants.DEFAULT_HBASE_CLIENT_PAUSE); long configuredPauseForCQTBE = conf.getLong(HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE, pause); if (configuredPauseForCQTBE < pause) { LOG.warn("The " + HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE + " setting: " + configuredPauseForCQTBE + " is smaller than " + HConstants.HBASE_CLIENT_PAUSE + ", will use " + pause + " instead."); this.pauseForCQTBE = pause; } else { this.pauseForCQTBE = configuredPauseForCQTBE; } retries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); startLogErrorsCnt = conf.getInt(AsyncProcess.START_LOG_ERRORS_AFTER_COUNT_KEY, AsyncProcess.DEFAULT_START_LOG_ERRORS_AFTER_COUNT); this.interceptor = interceptor; enableBackPressure = conf.getBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE, HConstants.DEFAULT_ENABLE_CLIENT_BACKPRESSURE); rpcTimeout = conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,HConstants.DEFAULT_HBASE_RPC_TIMEOUT); }
maxCompactSize = conf.getLong(HBASE_HSTORE_COMPACTION_MAX_SIZE_KEY, Long.MAX_VALUE); offPeakMaxCompactSize = conf.getLong(HBASE_HSTORE_COMPACTION_MAX_SIZE_OFFPEAK_KEY, maxCompactSize); minCompactSize = conf.getLong(HBASE_HSTORE_COMPACTION_MIN_SIZE_KEY, storeConfigInfo.getMemStoreFlushSize()); minFilesToCompact = Math.max(2, conf.getInt(HBASE_HSTORE_COMPACTION_MIN_KEY, /*old name*/ conf.getInt("hbase.hstore.compactionThreshold", 3))); maxFilesToCompact = conf.getInt(HBASE_HSTORE_COMPACTION_MAX_KEY, 10); compactionRatio = conf.getFloat(HBASE_HSTORE_COMPACTION_RATIO_KEY, 1.2F); offPeakCompactionRatio = conf.getFloat(HBASE_HSTORE_COMPACTION_RATIO_OFFPEAK_KEY, 5.0F); throttlePoint = conf.getLong("hbase.regionserver.thread.compaction.throttle", 2 * maxFilesToCompact * storeConfigInfo.getMemStoreFlushSize()); majorCompactionPeriod = conf.getLong(HConstants.MAJOR_COMPACTION_PERIOD, HConstants.DEFAULT_MAJOR_COMPACTION_PERIOD); majorCompactionJitter = conf.getFloat(HConstants.MAJOR_COMPACTION_JITTER, dateTieredMaxStoreFileAgeMillis = conf.getLong(DATE_TIERED_MAX_AGE_MILLIS_KEY, Long.MAX_VALUE); dateTieredIncomingWindowMin = conf.getInt(DATE_TIERED_INCOMING_WINDOW_MIN_KEY, 6); compactionPolicyForDateTieredWindow = conf.get( COMPACTION_POLICY_CLASS_FOR_DATE_TIERED_WINDOWS_KEY, DEFAULT_COMPACTION_POLICY_CLASS_FOR_DATE_TIERED_WINDOWS.getName()); dateTieredSingleOutputForMinorCompaction = conf .getBoolean(DATE_TIERED_SINGLE_OUTPUT_FOR_MINOR_COMPACTION_KEY, true); this.dateTieredCompactionWindowFactory = conf.get( DATE_TIERED_COMPACTION_WINDOW_FACTORY_CLASS_KEY, DEFAULT_DATE_TIERED_COMPACTION_WINDOW_FACTORY_CLASS.getName());
private void setupConf() { for (String f : LlapDaemonConfiguration.DAEMON_CONFIGS) { conf.addResource(f); } conf.reloadConfiguration(); // Setup timeouts for various services. // Once we move to a Hadoop-2.8 dependency, the following paramteer can be used. // conf.set(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_RETRY_POLICY_SPEC); conf.set("yarn.timeline-service.entity-group-fs-store.retry-policy-spec", conf.get(CONFIG_TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_RETRY_POLICY_SPEC, CONFIG_TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_RETRY_POLICY_SPEC_DEFAULT)); conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS, conf.getLong(CONFIG_YARN_RM_TIMEOUT_MAX_WAIT_MS, CONFIG_YARN_RM_TIMEOUT_MAX_WAIT_MS_DEFAULT)); conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS, conf.getLong(CONFIG_YARN_RM_RETRY_INTERVAL_MS, CONFIG_YARN_RM_RETRY_INTERVAL_MS_DEFAULT)); conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, conf.getInt(CONFIG_IPC_CLIENT_CONNECT_MAX_RETRIES, CONFIG_IPC_CLIENT_CONNECT_MAX_RETRIES_DEFAULT)); conf.setLong(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_RETRY_INTERVAL_KEY, conf.getLong(CONFIG_IPC_CLIENT_CONNECT_RETRY_INTERVAL_MS, CONFIG_IPC_CLIENT_CONNECT_RETRY_INTERVAL_MS_DEFAULT)); HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_ZOOKEEPER_SESSION_TIMEOUT, ( conf.getLong(CONFIG_LLAP_ZK_REGISTRY_TIMEOUT_MS, CONFIG_LLAP_ZK_REGISTRY_TIMEOUT_MS_DEFAULT) + "ms")); llapRegistryConf = new Configuration(conf); }
/** * Handles common parameter initialization that a subclass might want to leverage. * @param context */ protected void doSetup(Context context) { Configuration conf = context.getConfiguration(); // If a custom separator has been used, // decode it back from Base64 encoding. separator = conf.get(ImportTsv.SEPARATOR_CONF_KEY); if (separator == null) { separator = ImportTsv.DEFAULT_SEPARATOR; } else { separator = new String(Base64.getDecoder().decode(separator)); } // Should never get 0 as we are setting this to a valid value in job // configuration. ts = conf.getLong(ImportTsv.TIMESTAMP_CONF_KEY, 0); skipEmptyColumns = context.getConfiguration().getBoolean( ImportTsv.SKIP_EMPTY_COLUMNS, false); skipBadLines = context.getConfiguration().getBoolean( ImportTsv.SKIP_LINES_CONF_KEY, true); badLineCount = context.getCounter("ImportTsv", "Bad Lines"); logBadLines = context.getConfiguration().getBoolean(ImportTsv.LOG_BAD_LINES_CONF_KEY, false); hfileOutPath = conf.get(ImportTsv.BULK_OUTPUT_CONF_KEY); }
TraceUtil.initTracer(conf); try { if (conf.getBoolean(MAINTENANCE_MODE, false)) { LOG.info("Detected {}=true via configuration.", MAINTENANCE_MODE); maintenanceMode = true; conf.getLong("hbase.master.buffer.for.rs.fatals", 1 * 1024 * 1024)); LOG.info("hbase.rootdir=" + getRootDir() + ", hbase.cluster.distributed=" + this.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, false)); if (this.conf.get("mapreduce.task.attempt.id") == null) { this.conf.set("mapreduce.task.attempt.id", "hb_m_" + this.serverName.toString()); this.masterCheckCompression = conf.getBoolean("hbase.master.check.compression", true);
/** * @param conf * @param family Name of this store's column family * @param minVersions Store's MIN_VERSIONS setting * @param maxVersions Store's VERSIONS setting * @param ttl Store's TTL (in ms) * @param blockSize Store's block size * @param timeToPurgeDeletes duration in ms after which a delete marker can * be purged during a major compaction. * @param keepDeletedCells Store's keepDeletedCells setting * @param comparator The store's comparator */ public ScanInfo(Configuration conf, byte[] family, int minVersions, int maxVersions, long ttl, KeepDeletedCells keepDeletedCells, long blockSize, long timeToPurgeDeletes, CellComparator comparator, boolean newVersionBehavior) { this(family, minVersions, maxVersions, ttl, keepDeletedCells, timeToPurgeDeletes, comparator, conf.getLong(HConstants.TABLE_MAX_ROWSIZE_KEY, HConstants.TABLE_MAX_ROWSIZE_DEFAULT), conf.getBoolean("hbase.storescanner.use.pread", false), getCellsPerTimeoutCheck(conf), conf.getBoolean(StoreScanner.STORESCANNER_PARALLEL_SEEK_ENABLE, false), conf.getLong(StoreScanner.STORESCANNER_PREAD_MAX_BYTES, 4 * blockSize), newVersionBehavior); }
kerberosMinSecondsBeforeRelogin = 1000L * conf.getLong( HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN, HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN_DEFAULT); throw new IllegalArgumentException("Invalid attribute value for " + HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN + " of " + conf.get(HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN));
public LruBlockCache(long maxSize, long blockSize, boolean evictionThread, Configuration conf) { this(maxSize, blockSize, evictionThread, (int) Math.ceil(1.2 * maxSize / blockSize), DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL, conf.getFloat(LRU_MIN_FACTOR_CONFIG_NAME, DEFAULT_MIN_FACTOR), conf.getFloat(LRU_ACCEPTABLE_FACTOR_CONFIG_NAME, DEFAULT_ACCEPTABLE_FACTOR), conf.getFloat(LRU_SINGLE_PERCENTAGE_CONFIG_NAME, DEFAULT_SINGLE_FACTOR), conf.getFloat(LRU_MULTI_PERCENTAGE_CONFIG_NAME, DEFAULT_MULTI_FACTOR), conf.getFloat(LRU_MEMORY_PERCENTAGE_CONFIG_NAME, DEFAULT_MEMORY_FACTOR), conf.getFloat(LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME, DEFAULT_HARD_CAPACITY_LIMIT_FACTOR), conf.getBoolean(LRU_IN_MEMORY_FORCE_MODE_CONFIG_NAME, DEFAULT_IN_MEMORY_FORCE_MODE), conf.getLong(LRU_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE) ); }
public MobFileCache(Configuration conf) { this.conf = conf; this.mobFileMaxCacheSize = conf.getInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, MobConstants.DEFAULT_MOB_FILE_CACHE_SIZE); isCacheEnabled = (mobFileMaxCacheSize > 0); map = new ConcurrentHashMap<>(mobFileMaxCacheSize); if (isCacheEnabled) { long period = conf.getLong(MobConstants.MOB_CACHE_EVICT_PERIOD, MobConstants.DEFAULT_MOB_CACHE_EVICT_PERIOD); // in seconds evictRemainRatio = conf.getFloat(MobConstants.MOB_CACHE_EVICT_REMAIN_RATIO, MobConstants.DEFAULT_EVICT_REMAIN_RATIO); if (evictRemainRatio < 0.0) { evictRemainRatio = 0.0f; LOG.warn(MobConstants.MOB_CACHE_EVICT_REMAIN_RATIO + " is less than 0.0, 0.0 is used."); } else if (evictRemainRatio > 1.0) { evictRemainRatio = 1.0f; LOG.warn(MobConstants.MOB_CACHE_EVICT_REMAIN_RATIO + " is larger than 1.0, 1.0 is used."); } this.scheduleThreadPool.scheduleAtFixedRate(new EvictionThread(this), period, period, TimeUnit.SECONDS); if (LOG.isDebugEnabled()) { LOG.debug("MobFileCache enabled with cacheSize=" + mobFileMaxCacheSize + ", evictPeriods=" + period + "sec, evictRemainRatio=" + evictRemainRatio); } } else { LOG.info("MobFileCache disabled"); } }
@VisibleForTesting WALSplitter(final WALFactory factory, Configuration conf, Path walDir, FileSystem walFS, LastSequenceId idChecker, SplitLogWorkerCoordination splitLogWorkerCoordination) { this.conf = HBaseConfiguration.create(conf); String codecClassName = conf .get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName()); this.conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName); this.walDir = walDir; this.walFS = walFS; this.sequenceIdChecker = idChecker; this.splitLogWorkerCoordination = splitLogWorkerCoordination; this.walFactory = factory; PipelineController controller = new PipelineController(); this.splitWriterCreationBounded = conf.getBoolean(SPLIT_WRITER_CREATION_BOUNDED, false); entryBuffers = new EntryBuffers(controller, this.conf.getLong("hbase.regionserver.hlog.splitlog.buffersize", 128 * 1024 * 1024), splitWriterCreationBounded); int numWriterThreads = this.conf.getInt("hbase.regionserver.hlog.splitlog.writer.threads", 3); if(splitWriterCreationBounded){ outputSink = new BoundedLogWriterCreationOutputSink( controller, entryBuffers, numWriterThreads); }else { outputSink = new LogRecoveredEditsOutputSink(controller, entryBuffers, numWriterThreads); } }
public HealthCheckChore(int sleepTime, Stoppable stopper, Configuration conf) { super("HealthChecker", stopper, sleepTime); LOG.info("Health Check Chore runs every " + StringUtils.formatTime(sleepTime)); this.config = conf; String healthCheckScript = this.config.get(HConstants.HEALTH_SCRIPT_LOC); long scriptTimeout = this.config.getLong(HConstants.HEALTH_SCRIPT_TIMEOUT, HConstants.DEFAULT_HEALTH_SCRIPT_TIMEOUT); healthChecker = new HealthChecker(); healthChecker.init(healthCheckScript, scriptTimeout); this.threshold = config.getInt(HConstants.HEALTH_FAILURE_THRESHOLD, HConstants.DEFAULT_HEALTH_FAILURE_THRESHOLD); this.failureWindow = (long)this.threshold * (long)sleepTime; }
/** * Constructor * @param client * @param conf * @param accessToken */ public RemoteAdmin(Client client, Configuration conf, String accessToken) { this.client = client; this.conf = conf; this.accessToken = accessToken; this.maxRetries = conf.getInt("hbase.rest.client.max.retries", 10); this.sleepTime = conf.getLong("hbase.rest.client.sleep", 1000); }
/** * Handles common parameter initialization that a subclass might want to leverage. * @param context * @param conf */ protected void doSetup(Context context, Configuration conf) { // If a custom separator has been used, // decode it back from Base64 encoding. separator = conf.get(ImportTsv.SEPARATOR_CONF_KEY); if (separator == null) { separator = ImportTsv.DEFAULT_SEPARATOR; } else { separator = Bytes.toString(Base64.getDecoder().decode(separator)); } // Should never get 0 as we are setting this to a valid value in job configuration. ts = conf.getLong(ImportTsv.TIMESTAMP_CONF_KEY, 0); skipBadLines = context.getConfiguration().getBoolean(ImportTsv.SKIP_LINES_CONF_KEY, true); badLineCount = context.getCounter("ImportTsv", "Bad Lines"); }
this.writeBufferSize = conf.getLong(WRITE_BUFFER_SIZE_KEY, WRITE_BUFFER_SIZE_DEFAULT); this.writeBufferPeriodicFlushTimeoutMs = conf.getLong( WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS, WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS_DEFAULT); this.writeBufferPeriodicFlushTimerTickMs = conf.getLong( WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS, WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS_DEFAULT); this.metaOperationTimeout = conf.getInt(HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT, HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); this.operationTimeout = conf.getInt( HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); this.scannerCaching = conf.getInt( HConstants.HBASE_CLIENT_SCANNER_CACHING, HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING); conf.getLong(HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE); HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); this.clientScannerAsyncPrefetch = conf.getBoolean( Scan.HBASE_CLIENT_SCANNER_ASYNC_PREFETCH, Scan.DEFAULT_HBASE_CLIENT_SCANNER_ASYNC_PREFETCH);
/** * Get the variable as a long * @param conf configuration to retrieve it from * @param var variable to retrieve * @return value, or default value if value not in config file */ public static long getLongVar(Configuration conf, ConfVars var) { assert var.defaultVal.getClass() == Long.class; String val = conf.get(var.varname); return val == null ? conf.getLong(var.hiveName, (Long)var.defaultVal) : Long.valueOf(val); }
/** * Constructor. */ public ServerManager(final MasterServices master) { this.master = master; Configuration c = master.getConfiguration(); maxSkew = c.getLong("hbase.master.maxclockskew", 30000); warningSkew = c.getLong("hbase.master.warningclockskew", 10000); this.connection = master.getClusterConnection(); this.rpcControllerFactory = this.connection == null? null: connection.getRpcControllerFactory(); persistFlushedSequenceId = c.getBoolean(PERSIST_FLUSHEDSEQUENCEID, PERSIST_FLUSHEDSEQUENCEID_DEFAULT); }
conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS, CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS_DEFAULT) * 1000; negativeCacheTimeout = conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS, CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS_DEFAULT) * 1000; warningDeltaMs = conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS, CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS_DEFAULT); reloadGroupsInBackground = conf.getBoolean( CommonConfigurationKeys. HADOOP_SECURITY_GROUPS_CACHE_BACKGROUND_RELOAD, HADOOP_SECURITY_GROUPS_CACHE_BACKGROUND_RELOAD_DEFAULT); reloadGroupsThreadCount = conf.getInt( CommonConfigurationKeys. HADOOP_SECURITY_GROUPS_CACHE_BACKGROUND_RELOAD_THREADS,