congrats Icon
New! Announcing Tabnine Chat Beta
Learn More
Tabnine Logo
Mapper$Context.progress
Code IndexAdd Tabnine to your IDE (free)

How to use
progress
method
in
org.apache.hadoop.mapreduce.Mapper$Context

Best Java code snippets using org.apache.hadoop.mapreduce.Mapper$Context.progress (Showing top 20 results out of 369)

origin: apache/incubator-druid

 @Override
 protected void cleanup(
   Context context
 ) throws IOException
 {
  final String tmpDirLoc = context.getConfiguration().get(TMP_FILE_LOC_KEY);
  final File tmpDir = Paths.get(tmpDirLoc).toFile();
  FileUtils.deleteDirectory(tmpDir);
  context.progress();
  context.setStatus("Clean");
 }
}
origin: apache/hive

 @Override
 public void run() {
  try {
   int count = 0;
   while (sendReport) {
    // Periodically report progress on the Context object
    // to prevent TaskTracker from killing the Templeton
    // Controller task
    context.progress();
    count++;
    String msg = "KeepAlive Heart beat" + makeDots(count);
    LOG.info(msg);
    Thread.sleep(KEEP_ALIVE_MSEC);
   }
  } catch (InterruptedException e) {
   // Ok to be interrupted
  }
 }
}
origin: apache/incubator-druid

context.progress();
final Path inPath = new Path(JobHelper.getURIFromSegment(segment));
final File inDir = new File(tmpDir, "in");
context.progress();
final File outDir = new File(tmpDir, "out");
FileUtils.forceMkdir(outDir);
 HadoopDruidConverterConfig.INDEX_IO.validateTwoSegments(inDir, outDir);
context.progress();
context.setStatus("Starting PUSH");
final Path baseOutputPath = new Path(config.getSegmentOutputPath());
  config.DATA_SEGMENT_PUSHER
);
context.progress();
context.setStatus("Finished PUSH");
final String finalSegmentString = HadoopDruidConverterConfig.jsonMapper.writeValueAsString(finalSegment);
context.progress();
context.setStatus("Ready To Commit");
origin: io.github.repir/repir

@Override
public void mapperProgress() {
 if (mappercontext != null && System.currentTimeMillis() - lastprogress > 300000) {
   mappercontext.progress();
   lastprogress = System.currentTimeMillis();
 }
}
origin: io.github.repir/RepIR

@Override
public void mapperProgress() {
 if (mappercontext != null && System.currentTimeMillis() - lastprogress > 300000) {
   mappercontext.progress();
   lastprogress = System.currentTimeMillis();
 }
}
origin: org.apache.giraph/giraph-core

/**
 * Executes postSave() on worker observers.
 */
private void postSaveOnWorkerObservers() {
 for (WorkerObserver obs : serviceWorker.getWorkerObservers()) {
  obs.postSave();
  context.progress();
 }
}
origin: org.apache.accumulo/accumulo-test

@Override
public void testFailure(Failure failure) throws Exception {
 log.info("Test failed: {}", failure.getDescription(), failure.getException());
 failures.add(failure.getDescription().getMethodName());
 context.progress();
}
origin: org.apache.accumulo/accumulo-test

@Override
public void testFinished(Description description) throws Exception {
 log.info("Finished {}", description);
 context.progress();
}
origin: apache/giraph

@Override
public void progress() {
 master.getContext().progress();
}
origin: apache/phoenix

  context.progress();
} catch (SQLException e) {
  LOG.error(" Error {}  while read/write of a record ", e.getMessage());
origin: apache/phoenix

@Override
protected void map(NullWritable key, PhoenixIndexDBWritable record, Context context)
    throws IOException, InterruptedException {
  try {
    currentBatchCount++;
    final List<Object> values = record.getValues();
    indxWritable.setValues(values);
    indxWritable.write(this.pStatement);
    this.pStatement.execute();
    final PhoenixConnection pconn = connection.unwrap(PhoenixConnection.class);
    MutationState currentMutationState = pconn.getMutationState();
    if (mutationState == null) {
      mutationState = currentMutationState;
    }
    // Keep accumulating Mutations till batch size
    mutationState.join(currentMutationState);
    // Write Mutation Batch
    if (currentBatchCount % batchSize == 0) {
      writeBatch(mutationState, context);
      mutationState = null;
    }
    // Make sure progress is reported to Application Master.
    context.progress();
  } catch (SQLException e) {
    LOG.error(" Error {}  while read/write of a record ", e.getMessage());
    context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(currentBatchCount);
    throw new RuntimeException(e);
  }
  context.getCounter(PhoenixJobCounters.INPUT_RECORDS).increment(1);
}
origin: apache/phoenix

@Override
protected void map(NullWritable key, PhoenixIndexDBWritable record, Context context)
    throws IOException, InterruptedException {
  try {
    final List<Object> values = record.getValues();
    context.getCounter(PhoenixJobCounters.INPUT_RECORDS).increment(1);
    currentBatchValues.add(new Pair<>(record.getRowTs(), values));
    if (context.getCounter(PhoenixJobCounters.INPUT_RECORDS).getValue() % batchSize != 0) {
      // if we haven't hit the batch size, just report progress and move on to next record
      context.progress();
      return;
    } else {
      // otherwise, process the batch
      processBatch(context);
    }
    context.progress(); // Make sure progress is reported to Application Master.
  } catch (SQLException | IllegalArgumentException e) {
    LOG.error(" Error while read/write of a record ", e);
    context.getCounter(PhoenixJobCounters.FAILED_RECORDS).increment(1);
    throw new IOException(e);
  }
}
origin: apache/hive

 context.progress();
 Thread.sleep(POLL_JOBPROGRESS_MSEC);
} while (true);
origin: apache/hbase

context.progress();
origin: apache/hbase

 @Override
 protected void map(LongWritable key, Text value, final Context context)
     throws IOException, InterruptedException {
  Status status = new Status() {
   @Override
   public void setStatus(String msg) {
     context.setStatus(msg);
   }
  };
  ObjectMapper mapper = new ObjectMapper();
  TestOptions opts = mapper.readValue(value.toString(), TestOptions.class);
  Configuration conf = HBaseConfiguration.create(context.getConfiguration());
  final Connection con = ConnectionFactory.createConnection(conf);
  AsyncConnection asyncCon = null;
  try {
   asyncCon = ConnectionFactory.createAsyncConnection(conf).get();
  } catch (ExecutionException e) {
   throw new IOException(e);
  }
  // Evaluation task
  RunResult result = PerformanceEvaluation.runOneClient(this.cmd, conf, con, asyncCon, opts, status);
  // Collect how much time the thing took. Report as map output and
  // to the ELAPSED_TIME counter.
  context.getCounter(Counter.ELAPSED_TIME).increment(result.duration);
  context.getCounter(Counter.ROWS).increment(opts.perClientRunRows);
  context.write(new LongWritable(opts.startRow), new LongWritable(result.duration));
  context.progress();
 }
}
origin: apache/hbase

output.progress();
origin: apache/hbase

@Override
protected void map(NullWritable key, NullWritable value, Context context) throws IOException,
  InterruptedException {
 String suffix = "/" + shortTaskId;
 int BLOCK_SIZE = (int) (recordsToWrite / 100);
 for (long i = 0; i < recordsToWrite;) {
  for (long idx = 0; idx < BLOCK_SIZE && i < recordsToWrite; idx++, i++) {
   int expIdx = rand.nextInt(BLOCK_SIZE) % VISIBILITY_EXPS_COUNT;
   String exp = VISIBILITY_EXPS[expIdx];
   byte[] row = Bytes.add(Bytes.toBytes(i), Bytes.toBytes(suffix), Bytes.toBytes(exp));
   Put p = new Put(row);
   p.addColumn(TEST_FAMILY, TEST_QUALIFIER, HConstants.EMPTY_BYTE_ARRAY);
   p.setCellVisibility(new CellVisibility(exp));
   getCounter(expIdx).increment(1);
   mutator.mutate(p);
   if (i % 100 == 0) {
    context.setStatus("Written " + i + "/" + recordsToWrite + " records");
    context.progress();
   }
  }
  // End of block, flush all of them before we start writing anything
  // pointing to these!
  mutator.flush();
 }
}
origin: apache/hbase

protected void persist(Context output, long count, byte[][] prev, byte[][] current, byte[] id)
  throws IOException {
 for (int i = 0; i < current.length; i++) {
  if (i % 100 == 0) {
   // Tickle progress every so often else maprunner will think us hung
   output.progress();
  }
  Put put = new Put(current[i]);
  put.addColumn(FAMILY_NAME, COLUMN_PREV, prev == null ? NO_KEY : prev[i]);
  if (count >= 0) {
   put.addColumn(FAMILY_NAME, COLUMN_COUNT, Bytes.toBytes(count + i));
  }
  if (id != null) {
   put.addColumn(FAMILY_NAME, COLUMN_CLIENT, id);
  }
  // See if we are to write multiple columns.
  if (this.multipleUnevenColumnFamilies) {
   // Use any column name.
   put.addColumn(TINY_FAMILY_NAME, TINY_FAMILY_NAME, this.tinyValue);
   // Use any column name.
   put.addColumn(BIG_FAMILY_NAME, BIG_FAMILY_NAME, this.bigValue);
  }
  mutator.mutate(put);
 }
 mutator.flush();
}
origin: apache/hbase

 @Override
 protected void map(NullWritable key, PeInputSplit value, final Context context)
     throws IOException, InterruptedException {
  Status status = new Status() {
   @Override
   public void setStatus(String msg) {
     context.setStatus(msg);
   }
  };
  // Evaluation task
  pe.tableName = value.getTableName();
  long elapsedTime = this.pe.runOneClient(this.cmd, value.getStartRow(),
   value.getRows(), value.getTotalRows(),
   value.isFlushCommits(), value.isWriteToWAL(),
   value.isUseTags(), value.getNoOfTags(),
   ConnectionFactory.createConnection(context.getConfiguration()), status);
  // Collect how much time the thing took. Report as map output and
  // to the ELAPSED_TIME counter.
  context.getCounter(Counter.ELAPSED_TIME).increment(elapsedTime);
  context.getCounter(Counter.ROWS).increment(value.rows);
  context.write(new LongWritable(value.startRow), new LongWritable(elapsedTime));
  context.progress();
 }
}
origin: apache/incubator-gobblin

 @Override
 protected void updateTaskMetrics() {
  super.updateTaskMetrics();
  WorkUnit workUnit = this.task.getTaskState().getWorkunit();
  if (GobblinMetrics.isEnabled(workUnit)) {
   if (workUnit.getPropAsBoolean(ConfigurationKeys.MR_REPORT_METRICS_AS_COUNTERS_KEY,
     ConfigurationKeys.DEFAULT_MR_REPORT_METRICS_AS_COUNTERS)) {
    updateCounters(this.task);
   }
  }
  // Tell the TaskTracker it's making progress
  this.context.progress();
 }
}
org.apache.hadoop.mapreduceMapper$Contextprogress

Popular methods of Mapper$Context

  • write
  • getConfiguration
  • getCounter
  • getInputSplit
  • setStatus
  • getTaskAttemptID
  • nextKeyValue
  • getCurrentValue
  • getCurrentKey
  • getNumReduceTasks
  • getJobID
  • getInputFormatClass
  • getJobID,
  • getInputFormatClass,
  • getLocalCacheFiles,
  • getOutputCommitter,
  • getCredentials,
  • getLocalCacheArchives,
  • getStatus,
  • getCacheArchives,
  • getCacheFiles

Popular in Java

  • Finding current android device location
  • notifyDataSetChanged (ArrayAdapter)
  • getSharedPreferences (Context)
  • putExtra (Intent)
  • URLEncoder (java.net)
    This class is used to encode a string using the format required by application/x-www-form-urlencoded
  • SimpleDateFormat (java.text)
    Formats and parses dates in a locale-sensitive manner. Formatting turns a Date into a String, and pa
  • Timer (java.util)
    Timers schedule one-shot or recurring TimerTask for execution. Prefer java.util.concurrent.Scheduled
  • Reference (javax.naming)
  • LoggerFactory (org.slf4j)
    The LoggerFactory is a utility class producing Loggers for various logging APIs, most notably for lo
  • Option (scala)
  • Best plugins for Eclipse
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now