@Override public void configure(Context context) { Long timeOut = context.getLong("maxTimeOut"); if (timeOut != null) { maxTimeOut = timeOut; } }
/** * Gets value mapped to key, returning null if unmapped. * <p> * Note that this method returns an object as opposed to a * primitive. The configuration key requested may not be mapped * to a value and by returning the primitive object wrapper we can * return null. If the key does not exist the return value of * this method is assigned directly to a primitive, a * {@link NullPointerException} will be thrown. * </p> * @param key to be found * @return value associated with key or null if unmapped */ public Long getLong(String key) { return getLong(key, null); } /**
@Override public synchronized void configure(Context context) { super.configure(context); backoffSleepIncrement = context.getLong(PollableSourceConstants.BACKOFF_SLEEP_INCREMENT, PollableSourceConstants.DEFAULT_BACKOFF_SLEEP_INCREMENT); maxBackoffSleep = context.getLong(PollableSourceConstants.MAX_BACKOFF_SLEEP, PollableSourceConstants.DEFAULT_MAX_BACKOFF_SLEEP); }
@Override public void configure(Context context) { configuredMinReplicas = context.getInteger("hdfs.minBlockReplicas"); if (configuredMinReplicas != null) { Preconditions.checkArgument(configuredMinReplicas >= 0, "hdfs.minBlockReplicas must be greater than or equal to 0"); } numberOfCloseRetries = context.getInteger("hdfs.closeTries", 1) - 1; if (numberOfCloseRetries > 1) { try { //hdfs.callTimeout is deprecated from 1.9 timeBetweenCloseRetries = context.getLong("hdfs.callTimeout", 30000L); } catch (NumberFormatException e) { logger.warn("hdfs.callTimeout can not be parsed to a long: " + context.getLong("hdfs.callTimeout")); } timeBetweenCloseRetries = Math.max(timeBetweenCloseRetries / numberOfCloseRetries, 1000); } }
/** * Read parameters from context * <li>batchSize = type int that defines the size of event batches */ @Override protected void doConfigure(Context context) throws FlumeException { batchSize = context.getInteger("batchSize", 1); totalEvents = context.getLong("totalEvents", Long.MAX_VALUE); Preconditions.checkArgument(batchSize > 0, "batchSize was %s but expected positive", batchSize); if (sourceCounter == null) { sourceCounter = new SourceCounter(getName()); } }
rootHdfsPath = context.getString(CONFIG_ROOT_HDFS_PATH, DEFAULT_ROOT_HDFS_PATH); Preconditions.checkNotNull(rootHdfsPath, "rootHdfsPath is required"); txnEventMax = context.getLong(CONFIG_HDFS_TXN_EVENT_MAX, DEFAULT_HDFS_TXN_EVENT_MAX); statisticsInterval = context.getLong(CONFIG_STATISTICS_INTERVAL, DEFAULT_STATISTICS_INTERVAL); writerExpirationInterval = context.getInteger( CONFIG_HDFS_WRITER_EXPIRATION_INTERVAL, DEFAULT_HDFS_WRITER_EXPIRATION_INTERVAL); callTimeout = context.getLong(CONFIG_HDFS_CALL_TIMEOUT, DEFAULT_HDFS_CALL_TIMEOUT); rollInterval = context.getLong(CONFIG_HDFS_ROLL_INTERVAL, DEFAULT_HDFS_ROLL_INTERVAL); rollSize = context.getLong(CONFIG_HDFS_ROLL_SIZE, DEFAULT_HDFS_ROLL_SIZE); rollCount = context.getLong(CONFIG_HDFS_ROLL_COUNT, DEFAULT_HDFS_ROLL_COUNT); batchSize = context.getLong(CONFIG_HDFS_BATCH_SIZE, DEFAULT_HDFS_BATCH_SIZE); defaultBlockSize = context.getLong( CONFIG_HDFS_DEFAULT_BLOCK_SIZE, DEFAULT_HDFS_DEFAULT_BLOCK_SIZE);
@Override public void configure(Context context) { this.context = context; maxBatchSize = context.getInteger(BATCH_SIZE, maxBatchSize); maxBatchDurationMillis = context.getLong(BATCH_DURATION_MILLIS, maxBatchDurationMillis); handlerClass = context.getString(HANDLER_CLASS, MorphlineHandlerImpl.class.getName()); if (sinkCounter == null) { sinkCounter = new SinkCounter(getName()); } }
/** * Read parameters from context * <li>-maxTotalEvents = type long that defines the total number of Events to be sent * <li>-maxSuccessfulEvents = type long that defines the number of successful Events * <li>-size = type int that defines the number of bytes in each Event * <li>-batchSize = type int that defines the number of Events being sent in one batch */ @Override protected void doConfigure(Context context) throws FlumeException { /* Limit on the total number of events. */ maxTotalEvents = context.getLong("maxTotalEvents", -1L); /* Limit on the total number of successful events. */ maxSuccessfulEvents = context.getLong("maxSuccessfulEvents", -1L); /* Set max events in a batch submission */ batchSize = context.getInteger("batchSize", 1); /* Size of events to be generated. */ int size = context.getInteger("size", 500); int rateLimit = context.getInteger("maxEventsPerSecond", 0); if (rateLimit > 0) { limiter = RateLimiter.create(rateLimit); } else { limiter = null; } prepEventData(size); }
byteCapacity = (int) ((context.getLong("byteCapacity", defaultByteCapacity).longValue() * (1 - byteCapacityBufferPercentage * .01)) / byteCapacitySlotSize); if (byteCapacity < 1) {
@Override public void configure(Context context) { command = context.getString("command"); Preconditions.checkState(command != null, "The parameter command must be specified"); restartThrottle = context.getLong(ExecSourceConfigurationConstants.CONFIG_RESTART_THROTTLE, ExecSourceConfigurationConstants.DEFAULT_RESTART_THROTTLE); restart = context.getBoolean(ExecSourceConfigurationConstants.CONFIG_RESTART, ExecSourceConfigurationConstants.DEFAULT_RESTART); logStderr = context.getBoolean(ExecSourceConfigurationConstants.CONFIG_LOG_STDERR, ExecSourceConfigurationConstants.DEFAULT_LOG_STDERR); bufferCount = context.getInteger(ExecSourceConfigurationConstants.CONFIG_BATCH_SIZE, ExecSourceConfigurationConstants.DEFAULT_BATCH_SIZE); batchTimeout = context.getLong(ExecSourceConfigurationConstants.CONFIG_BATCH_TIME_OUT, ExecSourceConfigurationConstants.DEFAULT_BATCH_TIME_OUT); charset = Charset.forName(context.getString(ExecSourceConfigurationConstants.CHARSET, ExecSourceConfigurationConstants.DEFAULT_CHARSET)); shell = context.getString(ExecSourceConfigurationConstants.CONFIG_SHELL, null); if (sourceCounter == null) { sourceCounter = new SourceCounter(getName()); } }
DEFAULT_CACHE_PATTERN_MATCHING); backoffSleepIncrement = context.getLong(PollableSourceConstants.BACKOFF_SLEEP_INCREMENT, PollableSourceConstants.DEFAULT_BACKOFF_SLEEP_INCREMENT); maxBackOffSleepInterval = context.getLong(PollableSourceConstants.MAX_BACKOFF_SLEEP, PollableSourceConstants.DEFAULT_MAX_BACKOFF_SLEEP); fileHeader = context.getBoolean(FILENAME_HEADER, fileHeaderKey = context.getString(FILENAME_HEADER_KEY, DEFAULT_FILENAME_HEADER_KEY); maxBatchCount = context.getLong(MAX_BATCH_COUNT, DEFAULT_MAX_BATCH_COUNT); if (maxBatchCount <= 0) { maxBatchCount = DEFAULT_MAX_BATCH_COUNT;
String cf = context.getString( HBaseSinkConfigurationConstants.CONFIG_COLUMN_FAMILY); batchSize = context.getLong( HBaseSinkConfigurationConstants.CONFIG_BATCHSIZE, new Long(100)); serializerContext = new Context();
String cf = context.getString( HBase2SinkConfigurationConstants.CONFIG_COLUMN_FAMILY); batchSize = context.getLong( HBase2SinkConfigurationConstants.CONFIG_BATCHSIZE, 100L); Context serializerContext = new Context();
String cf = context.getString( HBaseSinkConfigurationConstants.CONFIG_COLUMN_FAMILY); batchSize = context.getLong( HBaseSinkConfigurationConstants.CONFIG_BATCHSIZE, new Long(100)); serializerContext = new Context(); sinkCounter = new SinkCounter(this.getName()); timeout = context.getLong(HBaseSinkConfigurationConstants.CONFIG_TIMEOUT, HBaseSinkConfigurationConstants.DEFAULT_TIMEOUT); if (timeout <= 0) {
rollInterval = context.getLong("hdfs.rollInterval", defaultRollInterval); rollSize = context.getLong("hdfs.rollSize", defaultRollSize); rollCount = context.getLong("hdfs.rollCount", defaultRollCount); batchSize = context.getLong("hdfs.batchSize", defaultBatchSize); idleTimeout = context.getInteger("hdfs.idleTimeout", 0); String codecName = context.getString("hdfs.codeC"); fileType = context.getString("hdfs.fileType", defaultFileType); maxOpenFiles = context.getInteger("hdfs.maxOpenFiles", defaultMaxOpenFiles); callTimeout = context.getLong("hdfs.callTimeout", defaultCallTimeout); threadsPoolSize = context.getInteger("hdfs.threadsPoolSize", defaultThreadPoolSize); tryCount = defaultTryCount; retryInterval = context.getLong("hdfs.retryInterval", defaultRetryInterval); if (retryInterval <= 0) { LOG.warn("Retry Interval value: " + retryInterval + " is not " +
context.getLong(FileChannelConfiguration.CHECKPOINT_INTERVAL, FileChannelConfiguration.DEFAULT_CHECKPOINT_INTERVAL); if (checkpointInterval <= 0) { context.getLong(FileChannelConfiguration.MAX_FILE_SIZE, FileChannelConfiguration.DEFAULT_MAX_FILE_SIZE), FileChannelConfiguration.DEFAULT_MAX_FILE_SIZE); context.getLong(FileChannelConfiguration.MINIMUM_REQUIRED_SPACE, FileChannelConfiguration.DEFAULT_MINIMUM_REQUIRED_SPACE), FileChannelConfiguration.FLOOR_MINIMUM_REQUIRED_SPACE);
pollTimeout = ctx.getLong(POLL_TIMEOUT, DEFAULT_POLL_TIMEOUT);
byteCapacity = (int) ((context.getLong(BYTE_CAPACITY, defaultByteCapacity) * (1 - byteCapacityBufferPercentage * .01)) / avgEventSize); if (byteCapacity < 1) {
@Override public void configure(Context context) { nameServer = context.getString(NAME_SERVER_CONFIG); if (nameServer == null) { throw new ConfigurationException("NameServer must not be null"); } topic = context.getString(TOPIC_CONFIG, TOPIC_DEFAULT); tag = context.getString(TAG_CONFIG, TAG_DEFAULT); producerGroup = context.getString(PRODUCER_GROUP_CONFIG, PRODUCER_GROUP_DEFAULT); batchSize = context.getInteger(BATCH_SIZE_CONFIG, BATCH_SIZE_DEFAULT); maxProcessTime = context.getLong(MAX_PROCESS_TIME_CONFIG, MAX_PROCESS_TIME_DEFAULT); if (sinkCounter == null) { sinkCounter = new SinkCounter(getName()); } }
this.batchSize = context.getLong(CONFIG_KITE_BATCH_SIZE, DEFAULT_BATCH_SIZE); this.rollIntervalSeconds = context.getInteger(CONFIG_KITE_ROLL_INTERVAL,