/** * Gets the number of threads used to transfer files while snapshotting/restoring. */ public int getNumberOfTransferingThreads() { return numberOfTransferingThreads == UNDEFINED_NUMBER_OF_TRANSFERING_THREADS ? CHECKPOINT_TRANSFER_THREAD_NUM.defaultValue() : numberOfTransferingThreads; }
/** * Creates a new job graph generator that uses the default values for its resource configuration. */ public JobGraphGenerator() { this.defaultMaxFan = AlgorithmOptions.SPILLING_MAX_FAN.defaultValue(); this.defaultSortSpillingThreshold = AlgorithmOptions.SORT_SPILLING_THRESHOLD.defaultValue(); this.useLargeRecordHandler = ConfigConstants.DEFAULT_USE_LARGE_RECORD_HANDLER; }
/** * Gets whether incremental checkpoints are enabled for this state backend. */ public boolean isIncrementalCheckpointsEnabled() { return enableIncrementalCheckpointing.getOrDefault(CheckpointingOptions.INCREMENTAL_CHECKPOINTS.defaultValue()); }
private Object getValueOrDefaultFromOption(ConfigOption<?> configOption) { Object o = getRawValueFromOption(configOption); return o != null ? o : configOption.defaultValue(); }
private static YarnConfigOptions.UserJarInclusion getUserJarInclusionMode(org.apache.flink.configuration.Configuration config) { String configuredUserJarInclusion = config.getString(YarnConfigOptions.CLASSPATH_INCLUDE_USER_JAR); try { return YarnConfigOptions.UserJarInclusion.valueOf(configuredUserJarInclusion.toUpperCase()); } catch (IllegalArgumentException e) { LOG.warn("Configuration parameter {} was configured with an invalid value {}. Falling back to default ({}).", YarnConfigOptions.CLASSPATH_INCLUDE_USER_JAR.key(), configuredUserJarInclusion, YarnConfigOptions.CLASSPATH_INCLUDE_USER_JAR.defaultValue()); return YarnConfigOptions.UserJarInclusion.valueOf(YarnConfigOptions.CLASSPATH_INCLUDE_USER_JAR.defaultValue()); } }
@Override public void open(MetricConfig config) { boolean filterLabelValueCharacters = config.getBoolean( FILTER_LABEL_VALUE_CHARACTER.key(), FILTER_LABEL_VALUE_CHARACTER.defaultValue()); if (!filterLabelValueCharacters) { labelValueCharactersFilter = input -> input; } }
static MapFunction<Event, Event> createFailureMapper(ParameterTool pt) { return new FailureMapper<>( pt.getLong( TEST_SIMULATE_FAILURE_NUM_RECORDS.key(), TEST_SIMULATE_FAILURE_NUM_RECORDS.defaultValue()), pt.getLong( TEST_SIMULATE_FAILURE_NUM_CHECKPOINTS.key(), TEST_SIMULATE_FAILURE_NUM_CHECKPOINTS.defaultValue()), pt.getInt( TEST_SIMULATE_FAILURE_MAX_FAILURES.key(), TEST_SIMULATE_FAILURE_MAX_FAILURES.defaultValue())); }
static FlatMapFunction<Event, String> createSemanticsCheckMapper(ParameterTool pt) { String semantics = pt.get(TEST_SEMANTICS.key(), TEST_SEMANTICS.defaultValue()); SemanticsCheckMapper.ValidatorFunction validatorFunction; if (semantics.equalsIgnoreCase("exactly-once")) { validatorFunction = SemanticsCheckMapper.ValidatorFunction.exactlyOnce(); } else if (semantics.equalsIgnoreCase("at-least-once")) { validatorFunction = SemanticsCheckMapper.ValidatorFunction.atLeastOnce(); } else { throw new IllegalArgumentException("Unknown semantics requested: " + semantics); } return new SemanticsCheckMapper(validatorFunction); }
static TtlTestConfig fromArgs(ParameterTool pt) { int keySpace = pt.getInt(UPDATE_GENERATOR_SRC_KEYSPACE.key(), UPDATE_GENERATOR_SRC_KEYSPACE.defaultValue()); long sleepAfterElements = pt.getLong(UPDATE_GENERATOR_SRC_SLEEP_AFTER_ELEMENTS.key(), UPDATE_GENERATOR_SRC_SLEEP_AFTER_ELEMENTS.defaultValue()); long sleepTime = pt.getLong(UPDATE_GENERATOR_SRC_SLEEP_TIME.key(), UPDATE_GENERATOR_SRC_SLEEP_TIME.defaultValue()); Time ttl = Time.milliseconds(pt.getLong(STATE_TTL_VERIFIER_TTL_MILLI.key(), STATE_TTL_VERIFIER_TTL_MILLI.defaultValue())); long reportStatAfterUpdatesNum = pt.getLong(REPORT_STAT_AFTER_UPDATES_NUM.key(), REPORT_STAT_AFTER_UPDATES_NUM.defaultValue()); return new TtlTestConfig(keySpace, sleepAfterElements, sleepTime, ttl, reportStatAfterUpdatesNum); } }
/** * Returns the value associated with the given config option as an integer. * * @param configOption The configuration option * @return the (default) value associated with the given config option */ @PublicEvolving public int getInteger(ConfigOption<Integer> configOption) { Object o = getValueOrDefaultFromOption(configOption); return convertToInt(o, configOption.defaultValue()); }
/** * Returns the value associated with the given config option as a long integer. * * @param configOption The configuration option * @return the (default) value associated with the given config option */ @PublicEvolving public long getLong(ConfigOption<Long> configOption) { Object o = getValueOrDefaultFromOption(configOption); return convertToLong(o, configOption.defaultValue()); }
/** * Returns the value associated with the given config option as a float. * * @param configOption The configuration option * @return the (default) value associated with the given config option */ @PublicEvolving public float getFloat(ConfigOption<Float> configOption) { Object o = getValueOrDefaultFromOption(configOption); return convertToFloat(o, configOption.defaultValue()); }
static FlatMapFunction<Tuple2<Integer, List<Event>>, String> createSlidingWindowCheckMapper(ParameterTool pt) { return new SlidingWindowCheckMapper(pt.getInt( TEST_SLIDE_FACTOR.key(), TEST_SLIDE_FACTOR.defaultValue() )); } }
/** * Returns the value associated with the given config option as a {@code double}. * * @param configOption The configuration option * @return the (default) value associated with the given config option */ @PublicEvolving public double getDouble(ConfigOption<Double> configOption) { Object o = getValueOrDefaultFromOption(configOption); return convertToDouble(o, configOption.defaultValue()); }
static WindowedStream<Event, Integer, TimeWindow> applyTumblingWindows( KeyedStream<Event, Integer> keyedStream, ParameterTool pt) { long eventTimeProgressPerEvent = pt.getLong( SEQUENCE_GENERATOR_SRC_EVENT_TIME_CLOCK_PROGRESS.key(), SEQUENCE_GENERATOR_SRC_EVENT_TIME_CLOCK_PROGRESS.defaultValue()); return keyedStream.timeWindow( Time.milliseconds( pt.getLong( TUMBLING_WINDOW_OPERATOR_NUM_EVENTS.key(), TUMBLING_WINDOW_OPERATOR_NUM_EVENTS.defaultValue() ) * eventTimeProgressPerEvent ) ); }
static SlidingEventTimeWindows createSlidingWindow(ParameterTool pt) { long slideSize = pt.getLong( TEST_SLIDE_SIZE.key(), TEST_SLIDE_SIZE.defaultValue()); long slideFactor = pt.getInt( TEST_SLIDE_FACTOR.key(), TEST_SLIDE_FACTOR.defaultValue() ); return SlidingEventTimeWindows.of(Time.milliseconds(slideSize * slideFactor), Time.milliseconds(slideSize)); }
@Override public void open(Configuration parameters) throws Exception { if (data == null) { // We need this to be large, because we want to test with files Random rand = new Random(getRuntimeContext().getIndexOfThisSubtask()); data = new byte[CheckpointingOptions.FS_SMALL_FILE_THRESHOLD.defaultValue() + 1]; rand.nextBytes(data); } }
public static void main(String[] args) throws Exception { final ParameterTool pt = ParameterTool.fromArgs(args); final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); setupEnvironment(env, pt); final int numStates = pt.getInt(NUM_LIST_STATES_PER_OP.key(), NUM_LIST_STATES_PER_OP.defaultValue()); final int numPartitionsPerState = pt.getInt(NUM_PARTITIONS_PER_LIST_STATE.key(), NUM_PARTITIONS_PER_LIST_STATE.defaultValue()); Preconditions.checkState(env.getCheckpointInterval() > 0L, "Checkpointing must be enabled for this test!"); env.addSource(new SimpleEndlessSourceWithBloatedState(numStates, numPartitionsPerState)).setParallelism(env.getParallelism()) .addSink(new DiscardingSink<>()).setParallelism(1); env.execute("HeavyDeploymentStressTestProgram"); }
@Test public void remoteModeInsufficientBuffersReceiver() throws Exception { StreamNetworkThroughputBenchmark env = new StreamNetworkThroughputBenchmark(); int writers = 2; int channels = 2; expectedException.expect(IOException.class); expectedException.expectMessage("Insufficient number of network buffers"); env.setUp(writers, channels, 100, false, writers * channels, writers * channels * TaskManagerOptions.NETWORK_BUFFERS_PER_CHANNEL.defaultValue() - 1); }
@Test public void remoteModeInsufficientBuffersSender() throws Exception { StreamNetworkThroughputBenchmark env = new StreamNetworkThroughputBenchmark(); int writers = 2; int channels = 2; expectedException.expect(IOException.class); expectedException.expectMessage("Insufficient number of network buffers"); env.setUp(writers, channels, 100, false, writers * channels - 1, writers * channels * TaskManagerOptions.NETWORK_BUFFERS_PER_CHANNEL.defaultValue()); }