Tabnine Logo
OutputCollector.collect
Code IndexAdd Tabnine to your IDE (free)

How to use
collect
method
in
org.apache.hadoop.mapred.OutputCollector

Best Java code snippets using org.apache.hadoop.mapred.OutputCollector.collect (Showing top 20 results out of 918)

Refine searchRefine arrow

  • Iterator.hasNext
  • Iterator.next
  • Text.toString
  • Text.<init>
  • IntWritable.<init>
origin: apache/hbase

 /**
  * No aggregation, output pairs of (key, record)
  * @param key
  * @param values
  * @param output
  * @param reporter
  * @throws IOException
  */
 public void reduce(ImmutableBytesWritable key, Iterator<Put> values,
   OutputCollector<ImmutableBytesWritable, Put> output,
   Reporter reporter)
   throws IOException {

  while(values.hasNext()) {
   output.collect(key, values.next());
  }
 }
}
origin: apache/flink

@Override
public void map(final IntWritable k, final Text v,
    final OutputCollector<IntWritable, Text> out, final Reporter r) throws IOException {
  out.collect(k, v);
  out.collect(k, new Text(v.toString().toUpperCase()));
}
origin: apache/flink

@Override
public void map(IntWritable k, Text v, OutputCollector<IntWritable, Text> out, Reporter r)
    throws IOException {
  if (v.toString().startsWith(filterPrefix)) {
    out.collect(k, v);
  }
}
origin: apache/flink

@Override
public void reduce(IntWritable k, Iterator<IntWritable> v, OutputCollector<IntWritable, IntWritable> out, Reporter r)
    throws IOException {
  while (v.hasNext()) {
    out.collect(new IntWritable(k.get() % 4), v.next());
  }
}
origin: intel-hadoop/HiBench

  public void map (final LongWritable key, final Text value, final OutputCollector<IntWritable, Text> output, final Reporter reporter) throws IOException
  {
    final String[] line = value.toString().split("\t");
    output.collect(new IntWritable(Integer.parseInt(line[0])), new Text(line[1]) );
  }
}
origin: intel-hadoop/HiBench

  public void map (final LongWritable key, final Text value, final OutputCollector<IntWritable, IntWritable> output, final Reporter reporter) throws IOException
  {
    String line_text = value.toString();
    final String[] line = line_text.split("\t");
    output.collect( new IntWritable(Integer.parseInt(line[1].substring(3))), new IntWritable(1) );
  }
}
origin: intel-hadoop/HiBench

 void collectStats(OutputCollector<Text, Text> output, 
          String name,
          long execTime, 
          Object objSize) throws IOException {
  long totalSize = ((Long)objSize).longValue();
  float ioRateMbSec = (float)totalSize * 1000 / (execTime * MEGA);
  LOG.info("Number of bytes processed = " + totalSize);
  LOG.info("Exec time = " + execTime);
  LOG.info("IO rate = " + ioRateMbSec);
  
  output.collect(new Text("l:tasks"), new Text(String.valueOf(1)));
  output.collect(new Text("l:size"), new Text(String.valueOf(totalSize)));
  output.collect(new Text("l:time"), new Text(String.valueOf(execTime)));
  output.collect(new Text("f:rate"), new Text(String.valueOf(ioRateMbSec*1000)));
  output.collect(new Text("f:sqrate"), new Text(String.valueOf(ioRateMbSec*ioRateMbSec*1000)));
 }
}
origin: apache/flink

@Override
public void reduce(IntWritable k, Iterator<IntWritable> v, OutputCollector<IntWritable, IntWritable> out, Reporter r)
    throws IOException {
  int sum = 0;
  while (v.hasNext()) {
    sum += v.next().get();
  }
  out.collect(k, new IntWritable(sum));
}
origin: apache/flink

@Override
public void reduce(Text k, Iterator<LongWritable> vs, OutputCollector<Text, LongWritable> out, Reporter rep)
    throws IOException {
  long cnt = 0;
  while (vs.hasNext()) {
    cnt += vs.next().get();
  }
  out.collect(k, new LongWritable(cnt));
}
origin: intel-hadoop/HiBench

  public void map (final LongWritable key, final Text value, final OutputCollector<IntWritable, Text> output, final Reporter reporter) throws IOException
  {
    final String[] line = value.toString().split("\t");
    output.collect(new IntWritable(Integer.parseInt(line[0])), new Text(line[1]) );
  }
}
origin: apache/flink

@Override
public void map(LongWritable k, Text v, OutputCollector<Text, LongWritable> out, Reporter rep)
    throws IOException {
  // normalize and split the line
  String line = v.toString();
  String[] tokens = line.toLowerCase().split("\\W+");
  // emit the pairs
  for (String token : tokens) {
    if (token.length() > 0) {
      out.collect(new Text(token), new LongWritable(1L));
    }
  }
}
origin: intel-hadoop/HiBench

  public void map (final LongWritable key, final Text value, final OutputCollector<IntWritable, IntWritable> output, final Reporter reporter) throws IOException
  {
    String[] line = value.toString().split("\t");
    output.collect(new IntWritable(Integer.parseInt(line[1])), new IntWritable(1) );
  }
}
origin: apache/flink

@Override
public void map(final IntWritable k, final Text v,
    final OutputCollector<IntWritable, Text> out, final Reporter r) throws IOException {
  if (v.toString().contains("bananas")) {
    out.collect(k, v);
  }
}
origin: intel-hadoop/HiBench

  @Override
  public void reduce(LongWritable key, Iterator<NullWritable> values,
      OutputCollector<NullWritable, Text> output, Reporter reporter)
          throws IOException {

    output.collect(NullWritable.get(), new Text(key.toString()));
  }
}
origin: apache/flink

@Override
public void reduce(IntWritable k, Iterator<Text> vs, OutputCollector<IntWritable, IntWritable> out, Reporter r)
    throws IOException {
  int commentCnt = 0;
  while (vs.hasNext()) {
    String v = vs.next().toString();
    if (v.startsWith(this.countPrefix)) {
      commentCnt++;
    }
  }
  out.collect(k, new IntWritable(commentCnt));
}
origin: intel-hadoop/HiBench

  public void reduce (final K1 key, final Iterator<V1> values, final OutputCollector<K1, V1> output, final Reporter reporter) throws IOException
  {
    while (values.hasNext()) {
      V1 cur_val = values.next();
      output.collect( key, cur_val );
    }
  }
}
origin: intel-hadoop/HiBench

  public void map (final LongWritable key, final Text value, final OutputCollector<IntWritable, Text> output, final Reporter reporter) throws IOException
  {
    final String[] line = value.toString().split("\t");
    IntWritable node_key = new IntWritable(Integer.parseInt(line[0]));
    output.collect(node_key, new Text(line[1]) );
  }
}
origin: intel-hadoop/HiBench

  public void map (final LongWritable key, final Text value, final OutputCollector<LongWritable, Text> output, final Reporter reporter) throws IOException
  {
    String line_text = value.toString();
    int tabpos = line_text.indexOf("\t");
    if( tabpos > 0 ) {
      long out_key = Long.parseLong(line_text.substring(0, tabpos));
      output.collect( new LongWritable(out_key) , new Text(line_text.substring(tabpos+1)) );
    } else {
      output.collect( new LongWritable(Long.parseLong(line_text)) , new Text("") );
    }
  }
}
origin: intel-hadoop/HiBench

  public void map (final LongWritable key, final Text value, final OutputCollector<IntWritable, IntWritable> output, final Reporter reporter) throws IOException
  {
    final String[] line = value.toString().split("\t");
    final String[] tokens = line[1].split(" ");
    String radius_str = tokens[0].substring(3) ;
    if( radius_str.length() > 0 ) {
      String[] radius_info = radius_str.split(":");
      double eff_radius = Double.parseDouble(radius_info[1]);
      output.collect( new IntWritable((int)Math.round(eff_radius)), new IntWritable(1) );
    }
  }
}
origin: apache/ignite

/** {@inheritDoc} */
@Override public void map(LongWritable key, Text val, OutputCollector<Text, IntWritable> output, Reporter reporter)
    throws IOException {
  assert wasConfigured : "Mapper should be configured";
  String line = val.toString();
  StringTokenizer tokenizer = new StringTokenizer(line);
  while (tokenizer.hasMoreTokens()) {
    word.set(tokenizer.nextToken());
    output.collect(word, one);
  }
  HadoopErrorSimulator.instance().onMap();
}
org.apache.hadoop.mapredOutputCollectorcollect

Javadoc

Adds a key/value pair to the output.

Popular methods of OutputCollector

    Popular in Java

    • Parsing JSON documents to java classes using gson
    • setContentView (Activity)
    • findViewById (Activity)
    • getExternalFilesDir (Context)
    • BufferedImage (java.awt.image)
      The BufferedImage subclass describes an java.awt.Image with an accessible buffer of image data. All
    • FileInputStream (java.io)
      An input stream that reads bytes from a file. File file = ...finally if (in != null) in.clos
    • ServerSocket (java.net)
      This class represents a server-side socket that waits for incoming client connections. A ServerSocke
    • Timestamp (java.sql)
      A Java representation of the SQL TIMESTAMP type. It provides the capability of representing the SQL
    • Comparator (java.util)
      A Comparator is used to compare two objects to determine their ordering with respect to each other.
    • TimerTask (java.util)
      The TimerTask class represents a task to run at a specified time. The task may be run once or repeat
    • Top plugins for WebStorm
    Tabnine Logo
    • Products

      Search for Java codeSearch for JavaScript code
    • IDE Plugins

      IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
    • Company

      About UsContact UsCareers
    • Resources

      FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
    Get Tabnine for your IDE now