Tabnine Logo
IntWritable.<init>
Code IndexAdd Tabnine to your IDE (free)

How to use
org.apache.hadoop.io.IntWritable
constructor

Best Java code snippets using org.apache.hadoop.io.IntWritable.<init> (Showing top 20 results out of 1,782)

Refine searchRefine arrow

  • Text.<init>
  • IntWritable.get
  • Text.toString
  • OutputCollector.collect
origin: apache/hive

public LazyInteger(LazyInteger copy) {
 super(copy);
 data = new IntWritable(copy.data.get());
}
origin: databricks/learning-spark

 public Tuple2<Text, IntWritable> call(Tuple2<String, Integer> record) {
  return new Tuple2(new Text(record._1), new IntWritable(record._2));
 }
}
origin: apache/flink

@Override
public void reduce(IntWritable k, Iterator<Text> vs, OutputCollector<IntWritable, IntWritable> out, Reporter r)
    throws IOException {
  int commentCnt = 0;
  while (vs.hasNext()) {
    String v = vs.next().toString();
    if (v.startsWith("Comment")) {
      commentCnt++;
    }
  }
  out.collect(new IntWritable(42), new IntWritable(commentCnt));
}
origin: intel-hadoop/HiBench

  public void map (final LongWritable key, final Text value, final 	OutputCollector<IntWritable, Text> output, final Reporter reporter) throws IOException
  {
    final String[] line = value.toString().split("\t");
    output.collect(new IntWritable(Integer.parseInt(line[0])), new Text(line[1]) );
  }
}
origin: org.apache.spark/spark-core_2.10

@SuppressWarnings("unchecked")
@Test
public void sequenceFile() {
 String outputDir = new File(tempDir, "output").getAbsolutePath();
 List<Tuple2<Integer, String>> pairs = Arrays.asList(
  new Tuple2<>(1, "a"),
  new Tuple2<>(2, "aa"),
  new Tuple2<>(3, "aaa")
 );
 JavaPairRDD<Integer, String> rdd = sc.parallelizePairs(pairs);
 rdd.mapToPair(pair -> new Tuple2<>(new IntWritable(pair._1()), new Text(pair._2())))
  .saveAsHadoopFile(outputDir, IntWritable.class, Text.class, SequenceFileOutputFormat.class);
 // Try reading the output back as an object file
 JavaPairRDD<Integer, String> readRDD = sc.sequenceFile(outputDir, IntWritable.class,
  Text.class).mapToPair(pair -> new Tuple2<>(pair._1().get(), pair._2().toString()));
 assertEquals(pairs, readRDD.collect());
}
origin: apache/hive

private void runAndVerify(String str, int months, String expResult, GenericUDF udf)
  throws HiveException {
 DeferredObject valueObj0 = new DeferredJavaObject(new Text(str));
 DeferredObject valueObj1 = new DeferredJavaObject(new IntWritable(months));
 DeferredObject[] args = { valueObj0, valueObj1 };
 Text output = (Text) udf.evaluate(args);
 assertEquals("add_months() test ", expResult, output != null ? output.toString() : null);
}
origin: apache/flink

@Override
public void reduce(IntWritable k, Iterator<IntWritable> v, OutputCollector<IntWritable, IntWritable> out, Reporter r)
    throws IOException {
  while (v.hasNext()) {
    out.collect(new IntWritable(k.get() % 4), v.next());
  }
}
origin: apache/hive

 @Override
 public void map(Object key, Text value, Context context)
   throws IOException, InterruptedException {
  String items[] = value.toString().split("\\s+");
  context.write(new IntWritable(items.length), value);
 }
}
origin: intel-hadoop/HiBench

  public void reduce (final IntWritable key, final Iterator<V1> values, final OutputCollector<IntWritable, IntWritable> output, final Reporter reporter) throws IOException
  {
    int count = 0;
    while (values.hasNext()) {
      values.next();
      count++;
    }
    output.collect( key, new IntWritable( count ) );
  }
}
origin: intel-hadoop/HiBench

  public void map (final LongWritable key, final Text value, final OutputCollector<IntWritable, Text> output, final Reporter reporter) throws IOException
  {
    String line_text = value.toString();
    int tabpos = line_text.indexOf("\t");
    if( tabpos > 0 ) {
      int out_key = Integer.parseInt(line_text.substring(0, tabpos));
      output.collect( new IntWritable(out_key) , new Text(line_text.substring(tabpos+1)) );
    } else {
      output.collect( new IntWritable(Integer.parseInt(line_text)) , new Text("") );
    }
  }
}
origin: org.apache.spark/spark-core_2.11

@SuppressWarnings("unchecked")
@Test
public void sequenceFile() {
 String outputDir = new File(tempDir, "output").getAbsolutePath();
 List<Tuple2<Integer, String>> pairs = Arrays.asList(
  new Tuple2<>(1, "a"),
  new Tuple2<>(2, "aa"),
  new Tuple2<>(3, "aaa")
 );
 JavaPairRDD<Integer, String> rdd = sc.parallelizePairs(pairs);
 rdd.mapToPair(pair -> new Tuple2<>(new IntWritable(pair._1()), new Text(pair._2())))
  .saveAsHadoopFile(outputDir, IntWritable.class, Text.class, SequenceFileOutputFormat.class);
 // Try reading the output back as an object file
 JavaPairRDD<Integer, String> readRDD = sc.sequenceFile(outputDir, IntWritable.class,
  Text.class).mapToPair(pair -> new Tuple2<>(pair._1().get(), pair._2().toString()));
 assertEquals(pairs, readRDD.collect());
}
origin: apache/flink

@Override
public Tuple2<Text, IntWritable> map(Tuple2<String, Integer> value) throws Exception {
  return new Tuple2<Text, IntWritable>(new Text(value.f0), new IntWritable(value.f1));
}
origin: apache/flink

@Override
public void reduce(IntWritable k, Iterator<Text> vs, OutputCollector<IntWritable, IntWritable> out, Reporter r)
    throws IOException {
  int commentCnt = 0;
  while (vs.hasNext()) {
    String v = vs.next().toString();
    if (v.startsWith(this.countPrefix)) {
      commentCnt++;
    }
  }
  out.collect(k, new IntWritable(commentCnt));
}
origin: apache/hive

private void runAndVerify(String str, String delim, Integer count, String expResult,
  GenericUDF udf) throws HiveException {
 DeferredObject valueObj0 = new DeferredJavaObject(str != null ? new Text(str) : null);
 DeferredObject valueObj1 = new DeferredJavaObject(delim != null ? new Text(delim) : delim);
 DeferredObject valueObj2 = new DeferredJavaObject(count != null ? new IntWritable(count) : null);
 DeferredObject[] args = { valueObj0, valueObj1, valueObj2 };
 Text output = (Text) udf.evaluate(args);
 assertEquals("substring_index() test ", expResult, output != null ? output.toString() : null);
}
origin: apache/hive

@Override
public Object copyObject(Object o) {
 return o == null ? null : new IntWritable(((IntWritable) o).get());
}
origin: apache/flink

@Override
public void reduce(IntWritable k, Iterator<IntWritable> v, OutputCollector<IntWritable, IntWritable> out, Reporter r)
    throws IOException {
  int sum = 0;
  while (v.hasNext()) {
    sum += v.next().get();
  }
  out.collect(k, new IntWritable(sum));
}
origin: apache/hive

private void runAndVerify(Timestamp ts, int months, Text dateFormat, String expResult, GenericUDF udf)
  throws HiveException {
 DeferredObject valueObj0 = new DeferredJavaObject(new TimestampWritableV2(ts));
 DeferredObject valueObj1 = new DeferredJavaObject(new IntWritable(months));
 DeferredObject valueObj2 = new DeferredJavaObject(dateFormat);
 DeferredObject[] args = {valueObj0, valueObj1, valueObj2};
 Text output = (Text) udf.evaluate(args);
 assertEquals("add_months() test for timestamp", expResult, output != null ? output.toString() : null);
}
origin: intel-hadoop/HiBench

  public void map (final LongWritable key, final Text value, final OutputCollector<IntWritable, Text> output, final Reporter reporter) throws IOException
  {
    final String[] line = value.toString().split("\t");
    output.collect(new IntWritable(Integer.parseInt(line[0])), new Text(line[1]) );
  }
}
origin: org.apache.spark/spark-core

@SuppressWarnings("unchecked")
@Test
public void sequenceFile() {
 String outputDir = new File(tempDir, "output").getAbsolutePath();
 List<Tuple2<Integer, String>> pairs = Arrays.asList(
  new Tuple2<>(1, "a"),
  new Tuple2<>(2, "aa"),
  new Tuple2<>(3, "aaa")
 );
 JavaPairRDD<Integer, String> rdd = sc.parallelizePairs(pairs);
 rdd.mapToPair(pair -> new Tuple2<>(new IntWritable(pair._1()), new Text(pair._2())))
  .saveAsHadoopFile(outputDir, IntWritable.class, Text.class, SequenceFileOutputFormat.class);
 // Try reading the output back as an object file
 JavaPairRDD<Integer, String> readRDD = sc.sequenceFile(outputDir, IntWritable.class,
  Text.class).mapToPair(pair -> new Tuple2<>(pair._1().get(), pair._2().toString()));
 assertEquals(pairs, readRDD.collect());
}
origin: apache/avro

@Override
protected void setup(Context context) {
 mCount = new IntWritable(0);
 mText = new Text("");
}
org.apache.hadoop.ioIntWritable<init>

Popular methods of IntWritable

  • get
    Return the value of this IntWritable.
  • set
    Set the value of this IntWritable.
  • toString
  • write
  • readFields
  • compareTo
    Compares two IntWritables.
  • equals
    Returns true iff o is a IntWritable with the same value.
  • hashCode
  • getClass

Popular in Java

  • Making http requests using okhttp
  • setContentView (Activity)
  • getSystemService (Context)
  • getSharedPreferences (Context)
  • URL (java.net)
    A Uniform Resource Locator that identifies the location of an Internet resource as specified by RFC
  • SecureRandom (java.security)
    This class generates cryptographically secure pseudo-random numbers. It is best to invoke SecureRand
  • BitSet (java.util)
    The BitSet class implements abit array [http://en.wikipedia.org/wiki/Bit_array]. Each element is eit
  • Reference (javax.naming)
  • JFrame (javax.swing)
  • Reflections (org.reflections)
    Reflections one-stop-shop objectReflections scans your classpath, indexes the metadata, allows you t
  • Top 17 PhpStorm Plugins
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimAtomGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyStudentsTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now