Tabnine Logo
StreamExecutionEnvironment.setRestartStrategy
Code IndexAdd Tabnine to your IDE (free)

How to use
setRestartStrategy
method
in
org.apache.flink.streaming.api.environment.StreamExecutionEnvironment

Best Java code snippets using org.apache.flink.streaming.api.environment.StreamExecutionEnvironment.setRestartStrategy (Showing top 20 results out of 315)

origin: apache/flink

env.setRestartStrategy(RestartStrategies.noRestart()); // fail immediately
env.getConfig().disableSysoutLogging();
origin: apache/flink

  public static void main(String[] args) throws Exception {
    ParameterTool params = ParameterTool.fromArgs(args);

    // define the dataflow
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setParallelism(2);
    env.setRestartStrategy(RestartStrategies.fixedDelayRestart(10, 1000));
    env.readFileStream("input/", 60000, FileMonitoringFunction.WatchType.ONLY_NEW_FILES)
      .addSink(new DiscardingSink<String>());

    // generate a job graph
    final JobGraph jobGraph = env.getStreamGraph().getJobGraph();
    File jobGraphFile = new File(params.get("output", "job.graph"));
    try (FileOutputStream output = new FileOutputStream(jobGraphFile);
      ObjectOutputStream obOutput = new ObjectOutputStream(output)){
      obOutput.writeObject(jobGraph);
    }
  }
}
origin: apache/flink

public static void main(final String[] args) throws Exception {
  final ParameterTool params = ParameterTool.fromArgs(args);
  final String outputPath = params.getRequired("outputPath");
  final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
  env.setParallelism(4);
  env.enableCheckpointing(5000L);
  env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, Time.of(10L, TimeUnit.SECONDS)));
  final StreamingFileSink<Tuple2<Integer, Integer>> sink = StreamingFileSink
    .forRowFormat(new Path(outputPath), (Encoder<Tuple2<Integer, Integer>>) (element, stream) -> {
      PrintStream out = new PrintStream(stream);
      out.println(element.f1);
    })
    .withBucketAssigner(new KeyBucketAssigner())
    .withRollingPolicy(OnCheckpointRollingPolicy.build())
    .build();
  // generate data, shuffle, sink
  env.addSource(new Generator(10, 10, 60))
    .keyBy(0)
    .addSink(sink);
  env.execute("StreamingFileSinkProgram");
}
origin: apache/flink

see.setRestartStrategy(RestartStrategies.noRestart());
see.setParallelism(1);
origin: apache/flink

/**
 * Runs the following program the test program defined in {@link #testProgram(StreamExecutionEnvironment)}
 * followed by the checks in {@link #postSubmit}.
 */
@Test
public void runCheckpointedProgram() throws Exception {
  try {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setParallelism(PARALLELISM);
    env.enableCheckpointing(500);
    env.getConfig().disableSysoutLogging();
    env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 0L));
    testProgram(env);
    TestUtils.tryExecute(env, "Fault Tolerance Test");
    postSubmit();
  }
  catch (Exception e) {
    e.printStackTrace();
    Assert.fail(e.getMessage());
  }
}
origin: apache/flink

env.enableCheckpointing(500);
env.getConfig().disableSysoutLogging();
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 0L));
origin: apache/flink

public static void main(String[] args) throws Exception {
  ParameterTool params = ParameterTool.fromArgs(args);
  String outputPath = params.getRequired("outputPath");
  StreamExecutionEnvironment sEnv = StreamExecutionEnvironment.getExecutionEnvironment();
  sEnv.setRestartStrategy(RestartStrategies.fixedDelayRestart(
      3,
      Time.of(10, TimeUnit.SECONDS)
    ));
  sEnv.enableCheckpointing(4000);
  final int idlenessMs = 10;
  // define bucketing sink to emit the result
  BucketingSink<Tuple4<Integer, Long, Integer, String>> sink = new BucketingSink<Tuple4<Integer, Long, Integer, String>>(outputPath)
    .setBucketer(new KeyBucketer());
  // generate data, shuffle, perform stateful operation, sink
  sEnv.addSource(new Generator(10, idlenessMs, 60))
    .keyBy(0)
    .map(new SubtractingMapper(-1L * idlenessMs))
    .addSink(sink);
  sEnv.execute();
}
origin: apache/flink

private JobGraph createJobGraph(ExecutionMode mode) {
  StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
  env.enableCheckpointing(500, CheckpointingMode.EXACTLY_ONCE);
  env.setRestartStrategy(RestartStrategies.noRestart());
  env.setStateBackend((StateBackend) new MemoryStateBackend());
  switch (mode) {
    case MIGRATE:
      createMigrationJob(env);
      break;
    case RESTORE:
      createRestoredJob(env);
      break;
  }
  return StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph());
}
origin: apache/flink

  public static void main(String[] args) throws Exception {
    final ParameterTool pt = ParameterTool.fromArgs(args);
    final String checkpointDir = pt.getRequired("checkpoint.dir");

    final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setStateBackend(new FsStateBackend(checkpointDir));
    env.setRestartStrategy(RestartStrategies.noRestart());
    env.enableCheckpointing(1000L);
    env.getConfig().disableGenericTypes();

    env.addSource(new MySource()).uid("my-source")
        .keyBy(anInt -> 0)
        .map(new MyStatefulFunction()).uid("my-map")
        .addSink(new DiscardingSink<>()).uid("my-sink");
    env.execute();
  }
}
origin: apache/flink

env.setParallelism(PARALLELISM);
env.getConfig().disableSysoutLogging();
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 1000));
env.enableCheckpointing(200);
origin: apache/flink

env.setMaxParallelism(pt.getInt("maxParallelism", pt.getInt("parallelism", 1)));
env.enableCheckpointing(pt.getInt("checkpointInterval", 1000));
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, pt.getInt("restartDelay", 0)));
if (pt.getBoolean("externalizedCheckpoints", false)) {
  env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
origin: apache/flink

private static JobGraph createJobGraphWithOperatorState(
    int parallelism, int maxParallelism, OperatorCheckpointMethod checkpointMethod) {
  StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
  env.setParallelism(parallelism);
  env.getConfig().setMaxParallelism(maxParallelism);
  env.enableCheckpointing(Long.MAX_VALUE);
  env.setRestartStrategy(RestartStrategies.noRestart());
  StateSourceBase.workStartedLatch = new CountDownLatch(parallelism);
  SourceFunction<Integer> src;
  switch (checkpointMethod) {
    case CHECKPOINTED_FUNCTION:
      src = new PartitionedStateSource(false);
      break;
    case CHECKPOINTED_FUNCTION_BROADCAST:
      src = new PartitionedStateSource(true);
      break;
    case LIST_CHECKPOINTED:
      src = new PartitionedStateSourceListCheckpointed();
      break;
    case NON_PARTITIONED:
      src = new NonPartitionedStateSource();
      break;
    default:
      throw new IllegalArgumentException();
  }
  DataStream<Integer> input = env.addSource(src);
  input.addSink(new DiscardingSink<Integer>());
  return env.getStreamGraph().getJobGraph();
}
origin: apache/flink

env.setParallelism(parallelism);
env.enableCheckpointing(500);
env.setRestartStrategy(RestartStrategies.noRestart());
env.getConfig().disableSysoutLogging();
origin: apache/flink

env.enableCheckpointing(500);
env.setParallelism(parallelism);
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0));
env.getConfig().disableSysoutLogging();
origin: apache/flink

private static JobGraph createJobGraphWithKeyedAndNonPartitionedOperatorState(
    int parallelism,
    int maxParallelism,
    int fixedParallelism,
    int numberKeys,
    int numberElements,
    boolean terminateAfterEmission,
    int checkpointingInterval) {
  StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
  env.setParallelism(parallelism);
  env.getConfig().setMaxParallelism(maxParallelism);
  env.enableCheckpointing(checkpointingInterval);
  env.setRestartStrategy(RestartStrategies.noRestart());
  DataStream<Integer> input = env.addSource(new SubtaskIndexNonPartitionedStateSource(
      numberKeys,
      numberElements,
      terminateAfterEmission))
      .setParallelism(fixedParallelism)
      .keyBy(new KeySelector<Integer, Integer>() {
        private static final long serialVersionUID = -7952298871120320940L;
        @Override
        public Integer getKey(Integer value) throws Exception {
          return value;
        }
      });
  SubtaskIndexFlatMapper.workCompletedLatch = new CountDownLatch(numberKeys);
  DataStream<Tuple2<Integer, Integer>> result = input.flatMap(new SubtaskIndexFlatMapper(numberElements));
  result.addSink(new CollectionSink<Tuple2<Integer, Integer>>());
  return env.getStreamGraph().getJobGraph();
}
origin: apache/flink

public static void main(String[] args) throws Exception {
  ParameterTool pt = ParameterTool.fromArgs(args);
  String savepointsPath = pt.getRequired("savepoint-path");
  Configuration config = new Configuration();
  config.setString(CheckpointingOptions.SAVEPOINT_DIRECTORY, savepointsPath);
  StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(config);
  env.enableCheckpointing(500, CheckpointingMode.EXACTLY_ONCE);
  env.setRestartStrategy(RestartStrategies.noRestart());
  env.setStateBackend(new MemoryStateBackend());
  /**
   * Source -> keyBy -> C(Window -> StatefulMap1 -> StatefulMap2)
   */
  SingleOutputStreamOperator<Tuple2<Integer, Integer>> source = createIntegerTupleSource(env, ExecutionMode.GENERATE);
  SingleOutputStreamOperator<Integer> window = createWindowFunction(ExecutionMode.GENERATE, source);
  SingleOutputStreamOperator<Integer> first = createFirstStatefulMap(ExecutionMode.GENERATE, window);
  SingleOutputStreamOperator<Integer> second = createSecondStatefulMap(ExecutionMode.GENERATE, first);
  env.execute("job");
}
origin: apache/flink

env.setRestartStrategy(RestartStrategies.noRestart());
env.getConfig().setUseSnapshotCompression(true);
origin: apache/flink

env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000L));
origin: apache/flink

public static void main(String[] args) throws Exception {
  ParameterTool pt = ParameterTool.fromArgs(args);
  String savepointsPath = pt.getRequired("savepoint-path");
  Configuration config = new Configuration();
  config.setString(CheckpointingOptions.SAVEPOINT_DIRECTORY, savepointsPath);
  StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(config);
  env.enableCheckpointing(500, CheckpointingMode.EXACTLY_ONCE);
  env.setRestartStrategy(RestartStrategies.noRestart());
  env.setStateBackend(new MemoryStateBackend());
  /**
   * Source -> StatefulMap1 -> CHAIN(StatefulMap2 -> Map -> StatefulMap3)
   */
  DataStream<Integer> source = createSource(env, ExecutionMode.GENERATE);
  SingleOutputStreamOperator<Integer> first = createFirstStatefulMap(ExecutionMode.GENERATE, source);
  first.startNewChain();
  SingleOutputStreamOperator<Integer> second = createSecondStatefulMap(ExecutionMode.GENERATE, first);
  second.startNewChain();
  SingleOutputStreamOperator<Integer> stateless = createStatelessMap(second);
  SingleOutputStreamOperator<Integer> third = createThirdStatefulMap(ExecutionMode.GENERATE, stateless);
  env.execute("job");
}
origin: apache/flink

env.setRestartStrategy(RestartStrategies.noRestart());
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
org.apache.flink.streaming.api.environmentStreamExecutionEnvironmentsetRestartStrategy

Javadoc

Sets the restart strategy configuration. The configuration specifies which restart strategy will be used for the execution graph in case of a restart.

Popular methods of StreamExecutionEnvironment

  • execute
  • getExecutionEnvironment
    Creates an execution environment that represents the context in which the program is currently execu
  • addSource
    Ads a data source with a custom type information thus opening a DataStream. Only in very special cas
  • getConfig
    Gets the config object.
  • enableCheckpointing
    Enables checkpointing for the streaming job. The distributed state of the streaming dataflow will be
  • setStreamTimeCharacteristic
    Sets the time characteristic for all streams create from this environment, e.g., processing time, ev
  • setParallelism
    Sets the parallelism for operations executed through this environment. Setting a parallelism of x he
  • fromElements
    Creates a new data stream that contains the given elements. The elements must all be of the same typ
  • setStateBackend
    Sets the state backend that describes how to store and checkpoint operator state. It defines both wh
  • createLocalEnvironment
    Creates a LocalStreamEnvironment. The local execution environment will run the program in a multi-th
  • fromCollection
    Creates a data stream from the given iterator.Because the iterator will remain unmodified until the
  • getCheckpointConfig
    Gets the checkpoint config, which defines values like checkpoint interval, delay between checkpoints
  • fromCollection,
  • getCheckpointConfig,
  • getParallelism,
  • getStreamGraph,
  • socketTextStream,
  • readTextFile,
  • generateSequence,
  • clean,
  • getStreamTimeCharacteristic

Popular in Java

  • Updating database using SQL prepared statement
  • getResourceAsStream (ClassLoader)
  • scheduleAtFixedRate (ScheduledExecutorService)
  • notifyDataSetChanged (ArrayAdapter)
  • ConnectException (java.net)
    A ConnectException is thrown if a connection cannot be established to a remote host on a specific po
  • ServerSocket (java.net)
    This class represents a server-side socket that waits for incoming client connections. A ServerSocke
  • Permission (java.security)
    Legacy security code; do not use.
  • Set (java.util)
    A Set is a data structure which does not allow duplicate elements.
  • ZipFile (java.util.zip)
    This class provides random read access to a zip file. You pay more to read the zip file's central di
  • Location (org.springframework.beans.factory.parsing)
    Class that models an arbitrary location in a Resource.Typically used to track the location of proble
  • Top plugins for WebStorm
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now