Tabnine Logo
ReduceNode
Code IndexAdd Tabnine to your IDE (free)

How to use
ReduceNode
in
org.apache.flink.optimizer.dag

Best Java code snippets using org.apache.flink.optimizer.dag.ReduceNode (Showing top 20 results out of 315)

origin: apache/flink

  public ReduceNode getCombinerUtilityNode() {
    if (this.preReduceUtilityNode == null) {
      this.preReduceUtilityNode = new ReduceNode(this);
      
      // we conservatively assume the combiner returns the same data size as it consumes 
      this.preReduceUtilityNode.estimatedOutputSize = getPredecessorNode().getEstimatedOutputSize();
      this.preReduceUtilityNode.estimatedNumRecords = getPredecessorNode().getEstimatedNumRecords();
    }
    return this.preReduceUtilityNode;
  }
}
origin: apache/flink

@Override
public SingleInputPlanNode instantiate(Channel in, SingleInputNode node) {
  Channel toReducer = in;
  if (in.getShipStrategy() == ShipStrategyType.FORWARD ||
      (node.getBroadcastConnections() != null && !node.getBroadcastConnections().isEmpty())) {
    if (in.getSource().getOptimizerNode() instanceof PartitionNode) {
      LOG.warn("Cannot automatically inject combiner for ReduceFunction. Please add an explicit combiner with combineGroup() in front of the partition operator.");
    }
  } else if (combinerStrategy != DriverStrategy.NONE) {
    // non forward case. all local properties are killed anyways, so we can safely plug in a combiner
    Channel toCombiner = new Channel(in.getSource());
    toCombiner.setShipStrategy(ShipStrategyType.FORWARD, DataExchangeMode.PIPELINED);
    // create an input node for combine with same parallelism as input node
    ReduceNode combinerNode = ((ReduceNode) node).getCombinerUtilityNode();
    combinerNode.setParallelism(in.getSource().getParallelism());
    SingleInputPlanNode combiner = new SingleInputPlanNode(combinerNode,
              "Combine ("+node.getOperator().getName()+")", toCombiner,
              this.combinerStrategy, this.keyList);
    combiner.setCosts(new Costs(0, 0));
    combiner.initProperties(toCombiner.getGlobalProperties(), toCombiner.getLocalProperties());
    toReducer = new Channel(combiner);
    toReducer.setShipStrategy(in.getShipStrategy(), in.getShipStrategyKeys(),
                  in.getShipStrategySortOrder(), in.getDataExchangeMode());
    toReducer.setLocalStrategy(LocalStrategy.SORT, in.getLocalStrategyKeys(), in.getLocalStrategySortOrder());
  }
  return new SingleInputPlanNode(node, "Reduce (" + node.getOperator().getName() + ")", toReducer,
    DriverStrategy.SORTED_REDUCE, this.keyList);
}
origin: apache/flink

setParallelism(1);
origin: apache/flink

n = new ReduceNode((ReduceOperatorBase<?, ?>) c);
origin: org.apache.flink/flink-optimizer

setParallelism(1);
origin: org.apache.flink/flink-optimizer_2.10

n = new ReduceNode((ReduceOperatorBase<?, ?>) c);
origin: apache/flink

@Override
public SingleInputPlanNode instantiate(Channel in, SingleInputNode node) {
  if (in.getShipStrategy() == ShipStrategyType.FORWARD) {
    // locally connected, directly instantiate
    return new SingleInputPlanNode(node, "Reduce ("+node.getOperator().getName()+")",
                    in, DriverStrategy.ALL_REDUCE);
  } else {
    // non forward case.plug in a combiner
    Channel toCombiner = new Channel(in.getSource());
    toCombiner.setShipStrategy(ShipStrategyType.FORWARD, DataExchangeMode.PIPELINED);
    
    // create an input node for combine with same parallelism as input node
    ReduceNode combinerNode = ((ReduceNode) node).getCombinerUtilityNode();
    combinerNode.setParallelism(in.getSource().getParallelism());
    SingleInputPlanNode combiner = new SingleInputPlanNode(combinerNode,
        "Combine ("+node.getOperator().getName()+")", toCombiner, DriverStrategy.ALL_REDUCE);
    combiner.setCosts(new Costs(0, 0));
    combiner.initProperties(toCombiner.getGlobalProperties(), toCombiner.getLocalProperties());
    
    Channel toReducer = new Channel(combiner);
    toReducer.setShipStrategy(in.getShipStrategy(), in.getShipStrategyKeys(),
                  in.getShipStrategySortOrder(), in.getDataExchangeMode());
    toReducer.setLocalStrategy(in.getLocalStrategy(), in.getLocalStrategyKeys(),
                  in.getLocalStrategySortOrder());
    return new SingleInputPlanNode(node, "Reduce ("+node.getOperator().getName()+")",
                    toReducer, DriverStrategy.ALL_REDUCE);
  }
}

origin: org.apache.flink/flink-optimizer

  public ReduceNode getCombinerUtilityNode() {
    if (this.preReduceUtilityNode == null) {
      this.preReduceUtilityNode = new ReduceNode(this);
      
      // we conservatively assume the combiner returns the same data size as it consumes 
      this.preReduceUtilityNode.estimatedOutputSize = getPredecessorNode().getEstimatedOutputSize();
      this.preReduceUtilityNode.estimatedNumRecords = getPredecessorNode().getEstimatedNumRecords();
    }
    return this.preReduceUtilityNode;
  }
}
origin: org.apache.flink/flink-optimizer_2.10

setParallelism(1);
origin: org.apache.flink/flink-optimizer_2.11

n = new ReduceNode((ReduceOperatorBase<?, ?>) c);
origin: org.apache.flink/flink-optimizer_2.11

@Override
public SingleInputPlanNode instantiate(Channel in, SingleInputNode node) {
  Channel toReducer = in;
  if (in.getShipStrategy() == ShipStrategyType.FORWARD ||
      (node.getBroadcastConnections() != null && !node.getBroadcastConnections().isEmpty())) {
    if (in.getSource().getOptimizerNode() instanceof PartitionNode) {
      LOG.warn("Cannot automatically inject combiner for ReduceFunction. Please add an explicit combiner with combineGroup() in front of the partition operator.");
    }
  } else if (combinerStrategy != DriverStrategy.NONE) {
    // non forward case. all local properties are killed anyways, so we can safely plug in a combiner
    Channel toCombiner = new Channel(in.getSource());
    toCombiner.setShipStrategy(ShipStrategyType.FORWARD, DataExchangeMode.PIPELINED);
    // create an input node for combine with same parallelism as input node
    ReduceNode combinerNode = ((ReduceNode) node).getCombinerUtilityNode();
    combinerNode.setParallelism(in.getSource().getParallelism());
    SingleInputPlanNode combiner = new SingleInputPlanNode(combinerNode,
              "Combine ("+node.getOperator().getName()+")", toCombiner,
              this.combinerStrategy, this.keyList);
    combiner.setCosts(new Costs(0, 0));
    combiner.initProperties(toCombiner.getGlobalProperties(), toCombiner.getLocalProperties());
    toReducer = new Channel(combiner);
    toReducer.setShipStrategy(in.getShipStrategy(), in.getShipStrategyKeys(),
                  in.getShipStrategySortOrder(), in.getDataExchangeMode());
    toReducer.setLocalStrategy(LocalStrategy.SORT, in.getLocalStrategyKeys(), in.getLocalStrategySortOrder());
  }
  return new SingleInputPlanNode(node, "Reduce (" + node.getOperator().getName() + ")", toReducer,
    DriverStrategy.SORTED_REDUCE, this.keyList);
}
origin: org.apache.flink/flink-optimizer_2.10

  public ReduceNode getCombinerUtilityNode() {
    if (this.preReduceUtilityNode == null) {
      this.preReduceUtilityNode = new ReduceNode(this);
      
      // we conservatively assume the combiner returns the same data size as it consumes 
      this.preReduceUtilityNode.estimatedOutputSize = getPredecessorNode().getEstimatedOutputSize();
      this.preReduceUtilityNode.estimatedNumRecords = getPredecessorNode().getEstimatedNumRecords();
    }
    return this.preReduceUtilityNode;
  }
}
origin: org.apache.flink/flink-optimizer_2.11

setParallelism(1);
origin: org.apache.flink/flink-optimizer

n = new ReduceNode((ReduceOperatorBase<?, ?>) c);
origin: org.apache.flink/flink-optimizer

@Override
public SingleInputPlanNode instantiate(Channel in, SingleInputNode node) {
  Channel toReducer = in;
  if (in.getShipStrategy() == ShipStrategyType.FORWARD ||
      (node.getBroadcastConnections() != null && !node.getBroadcastConnections().isEmpty())) {
    if (in.getSource().getOptimizerNode() instanceof PartitionNode) {
      LOG.warn("Cannot automatically inject combiner for ReduceFunction. Please add an explicit combiner with combineGroup() in front of the partition operator.");
    }
  } else if (combinerStrategy != DriverStrategy.NONE) {
    // non forward case. all local properties are killed anyways, so we can safely plug in a combiner
    Channel toCombiner = new Channel(in.getSource());
    toCombiner.setShipStrategy(ShipStrategyType.FORWARD, DataExchangeMode.PIPELINED);
    // create an input node for combine with same parallelism as input node
    ReduceNode combinerNode = ((ReduceNode) node).getCombinerUtilityNode();
    combinerNode.setParallelism(in.getSource().getParallelism());
    SingleInputPlanNode combiner = new SingleInputPlanNode(combinerNode,
              "Combine ("+node.getOperator().getName()+")", toCombiner,
              this.combinerStrategy, this.keyList);
    combiner.setCosts(new Costs(0, 0));
    combiner.initProperties(toCombiner.getGlobalProperties(), toCombiner.getLocalProperties());
    toReducer = new Channel(combiner);
    toReducer.setShipStrategy(in.getShipStrategy(), in.getShipStrategyKeys(),
                  in.getShipStrategySortOrder(), in.getDataExchangeMode());
    toReducer.setLocalStrategy(LocalStrategy.SORT, in.getLocalStrategyKeys(), in.getLocalStrategySortOrder());
  }
  return new SingleInputPlanNode(node, "Reduce (" + node.getOperator().getName() + ")", toReducer,
    DriverStrategy.SORTED_REDUCE, this.keyList);
}
origin: org.apache.flink/flink-optimizer_2.11

  public ReduceNode getCombinerUtilityNode() {
    if (this.preReduceUtilityNode == null) {
      this.preReduceUtilityNode = new ReduceNode(this);
      
      // we conservatively assume the combiner returns the same data size as it consumes 
      this.preReduceUtilityNode.estimatedOutputSize = getPredecessorNode().getEstimatedOutputSize();
      this.preReduceUtilityNode.estimatedNumRecords = getPredecessorNode().getEstimatedNumRecords();
    }
    return this.preReduceUtilityNode;
  }
}
origin: com.alibaba.blink/flink-optimizer

setParallelism(1);
origin: com.alibaba.blink/flink-optimizer

n = new ReduceNode((ReduceOperatorBase<?, ?>) c);
origin: com.alibaba.blink/flink-optimizer

@Override
public SingleInputPlanNode instantiate(Channel in, SingleInputNode node) {
  Channel toReducer = in;
  if (in.getShipStrategy() == ShipStrategyType.FORWARD ||
      (node.getBroadcastConnections() != null && !node.getBroadcastConnections().isEmpty())) {
    if (in.getSource().getOptimizerNode() instanceof PartitionNode) {
      LOG.warn("Cannot automatically inject combiner for ReduceFunction. Please add an explicit combiner with combineGroup() in front of the partition operator.");
    }
  } else if (combinerStrategy != DriverStrategy.NONE) {
    // non forward case. all local properties are killed anyways, so we can safely plug in a combiner
    Channel toCombiner = new Channel(in.getSource());
    toCombiner.setShipStrategy(ShipStrategyType.FORWARD, DataExchangeMode.PIPELINED);
    // create an input node for combine with same parallelism as input node
    ReduceNode combinerNode = ((ReduceNode) node).getCombinerUtilityNode();
    combinerNode.setParallelism(in.getSource().getParallelism());
    SingleInputPlanNode combiner = new SingleInputPlanNode(combinerNode,
              "Combine ("+node.getOperator().getName()+")", toCombiner,
              this.combinerStrategy, this.keyList);
    combiner.setCosts(new Costs(0, 0));
    combiner.initProperties(toCombiner.getGlobalProperties(), toCombiner.getLocalProperties());
    toReducer = new Channel(combiner);
    toReducer.setShipStrategy(in.getShipStrategy(), in.getShipStrategyKeys(),
                  in.getShipStrategySortOrder(), in.getDataExchangeMode());
    toReducer.setLocalStrategy(LocalStrategy.SORT, in.getLocalStrategyKeys(), in.getLocalStrategySortOrder());
  }
  return new SingleInputPlanNode(node, "Reduce (" + node.getOperator().getName() + ")", toReducer,
    DriverStrategy.SORTED_REDUCE, this.keyList);
}
origin: com.alibaba.blink/flink-optimizer

  public ReduceNode getCombinerUtilityNode() {
    if (this.preReduceUtilityNode == null) {
      this.preReduceUtilityNode = new ReduceNode(this);
      
      // we conservatively assume the combiner returns the same data size as it consumes 
      this.preReduceUtilityNode.estimatedOutputSize = getPredecessorNode().getEstimatedOutputSize();
      this.preReduceUtilityNode.estimatedNumRecords = getPredecessorNode().getEstimatedNumRecords();
    }
    return this.preReduceUtilityNode;
  }
}
org.apache.flink.optimizer.dagReduceNode

Javadoc

The Optimizer representation of a Reduce operator.

Most used methods

  • <init>
  • getCombinerUtilityNode
  • getPredecessorNode
  • setParallelism

Popular in Java

  • Reactive rest calls using spring rest template
  • onCreateOptionsMenu (Activity)
  • scheduleAtFixedRate (Timer)
  • addToBackStack (FragmentTransaction)
  • Locale (java.util)
    Locale represents a language/country/variant combination. Locales are used to alter the presentatio
  • Semaphore (java.util.concurrent)
    A counting semaphore. Conceptually, a semaphore maintains a set of permits. Each #acquire blocks if
  • AtomicInteger (java.util.concurrent.atomic)
    An int value that may be updated atomically. See the java.util.concurrent.atomic package specificati
  • DataSource (javax.sql)
    An interface for the creation of Connection objects which represent a connection to a database. This
  • JButton (javax.swing)
  • JOptionPane (javax.swing)
  • From CI to AI: The AI layer in your organization
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now