public void run() { try { executingBatches.getAndIncrement(); result = runUpdate(documents); } finally { executingBatches.getAndDecrement(); } }
heartBeater.needHeartBeat(); if (batch.size() > 0) { batchWriter.queueBatch(batch); numDocsWritten += batch.size(); batch.clear(); batchWriter.close(context);
public SolrRecordWriter(TaskAttemptContext context, Path outputShardDir, int batchSize) { this.batchSize = batchSize; this.batch = new ArrayList<>(batchSize); Configuration conf = context.getConfiguration(); // setLogLevel("org.apache.solr.core", "WARN"); // setLogLevel("org.apache.solr.update", "WARN"); heartBeater = new HeartBeater(context); try { heartBeater.needHeartBeat(); Path solrHomeDir = SolrRecordWriter.findSolrConfig(conf); FileSystem fs = outputShardDir.getFileSystem(conf); EmbeddedSolrServer solr = createEmbeddedSolrServer(solrHomeDir, fs, outputShardDir); batchWriter = new BatchWriter(solr, batchSize, context.getTaskAttemptID().getTaskID(), SolrOutputFormat.getSolrWriterThreadCount(conf), SolrOutputFormat.getSolrWriterQueueSize(conf)); } catch (Exception e) { throw new IllegalStateException(String.format(Locale.ENGLISH, "Failed to initialize record writer for %s, %s", context.getJobName(), conf .get("mapred.task.id")), e); } finally { heartBeater.cancelHeartBeat(); } }
/** * Write a record. This method accumulates records in to a batch, and when * {@link #batchSize} items are present flushes it to the indexer. The writes * can take a substantial amount of time, depending on {@link #batchSize}. If * there is heavy disk contention the writes may take more than the 600 second * default timeout. */ @Override public void write(K key, V value) throws IOException { heartBeater.needHeartBeat(); try { try { SolrInputDocumentWritable sidw = (SolrInputDocumentWritable) value; batch.add(sidw.getSolrInputDocument()); if (batch.size() >= batchSize) { batchWriter.queueBatch(batch); numDocsWritten += batch.size(); if (System.nanoTime() >= nextLogTime) { LOG.info("docsWritten: {}", numDocsWritten); nextLogTime += TimeUnit.NANOSECONDS.convert(10, TimeUnit.SECONDS); } batch.clear(); } } catch (SolrServerException e) { throw new IOException(e); } } finally { heartBeater.cancelHeartBeat(); } }
protected UpdateResponse runUpdate(List<SolrInputDocument> batchToWrite) { try { UpdateResponse result = solr.add(batchToWrite); SolrRecordWriter.incrementCounter(taskId, SolrCounters.class.getName(), SolrCounters.BATCHES_WRITTEN.toString(), 1); SolrRecordWriter.incrementCounter(taskId, SolrCounters.class.getName(), SolrCounters.DOCUMENTS_WRITTEN.toString(), batchToWrite.size()); if (LOG.isDebugEnabled()) { SolrRecordWriter.incrementCounter(taskId, SolrCounters.class.getName(), SolrCounters.BATCH_WRITE_TIME.toString(), result.getElapsedTime()); } return result; } catch (Throwable e) { if (e instanceof Exception) { setBatchWriteException((Exception) e); } else { setBatchWriteException(new Exception(e)); } SolrRecordWriter.incrementCounter(taskId, getClass().getName() + ".errors", e.getClass().getName(), 1); LOG.error("Unable to process batch", e); return null; } }
heartBeater.needHeartBeat(); if (batch.size() > 0) { batchWriter.queueBatch(batch); numDocsWritten += batch.size(); batch.clear(); batchWriter.close(context);
public SolrRecordWriter(TaskAttemptContext context, Path outputShardDir, int batchSize) { this.batchSize = batchSize; this.batch = new ArrayList<>(batchSize); Configuration conf = context.getConfiguration(); // setLogLevel("org.apache.solr.core", "WARN"); // setLogLevel("org.apache.solr.update", "WARN"); heartBeater = new HeartBeater(context); try { heartBeater.needHeartBeat(); Path solrHomeDir = SolrRecordWriter.findSolrConfig(conf); FileSystem fs = outputShardDir.getFileSystem(conf); EmbeddedSolrServer solr = createEmbeddedSolrServer(solrHomeDir, fs, outputShardDir); batchWriter = new BatchWriter(solr, batchSize, context.getTaskAttemptID().getTaskID(), SolrOutputFormat.getSolrWriterThreadCount(conf), SolrOutputFormat.getSolrWriterQueueSize(conf)); } catch (Exception e) { throw new IllegalStateException(String.format(Locale.ENGLISH, "Failed to initialize record writer for %s, %s", context.getJobName(), conf .get("mapred.task.id")), e); } finally { heartBeater.cancelHeartBeat(); } }
/** * Write a record. This method accumulates records in to a batch, and when * {@link #batchSize} items are present flushes it to the indexer. The writes * can take a substantial amount of time, depending on {@link #batchSize}. If * there is heavy disk contention the writes may take more than the 600 second * default timeout. */ @Override public void write(K key, V value) throws IOException { heartBeater.needHeartBeat(); try { try { SolrInputDocumentWritable sidw = (SolrInputDocumentWritable) value; batch.add(sidw.getSolrInputDocument()); if (batch.size() >= batchSize) { batchWriter.queueBatch(batch); numDocsWritten += batch.size(); if (System.nanoTime() >= nextLogTime) { LOG.info("docsWritten: {}", numDocsWritten); nextLogTime += TimeUnit.NANOSECONDS.convert(10, TimeUnit.SECONDS); } batch.clear(); } } catch (SolrServerException e) { throw new IOException(e); } } finally { heartBeater.cancelHeartBeat(); } }
protected UpdateResponse runUpdate(List<SolrInputDocument> batchToWrite) { try { UpdateResponse result = solr.add(batchToWrite); SolrRecordWriter.incrementCounter(taskId, SolrCounters.class.getName(), SolrCounters.BATCHES_WRITTEN.toString(), 1); SolrRecordWriter.incrementCounter(taskId, SolrCounters.class.getName(), SolrCounters.DOCUMENTS_WRITTEN.toString(), batchToWrite.size()); if (LOG.isDebugEnabled()) { SolrRecordWriter.incrementCounter(taskId, SolrCounters.class.getName(), SolrCounters.BATCH_WRITE_TIME.toString(), result.getElapsedTime()); } return result; } catch (Throwable e) { if (e instanceof Exception) { setBatchWriteException((Exception) e); } else { setBatchWriteException(new Exception(e)); } SolrRecordWriter.incrementCounter(taskId, getClass().getName() + ".errors", e.getClass().getName(), 1); LOG.error("Unable to process batch", e); return null; } }
heartBeater.needHeartBeat(); if (batch.size() > 0) { batchWriter.queueBatch(batch); numDocsWritten += batch.size(); batch.clear(); batchWriter.close(context);
public SolrRecordWriter(TaskAttemptContext context, Path outputShardDir, int batchSize) { this.batchSize = batchSize; this.batch = new ArrayList<>(batchSize); Configuration conf = context.getConfiguration(); // setLogLevel("org.apache.solr.core", "WARN"); // setLogLevel("org.apache.solr.update", "WARN"); heartBeater = new HeartBeater(context); try { heartBeater.needHeartBeat(); Path solrHomeDir = SolrRecordWriter.findSolrConfig(conf); FileSystem fs = outputShardDir.getFileSystem(conf); EmbeddedSolrServer solr = createEmbeddedSolrServer(solrHomeDir, fs, outputShardDir); batchWriter = new BatchWriter(solr, batchSize, context.getTaskAttemptID().getTaskID(), SolrOutputFormat.getSolrWriterThreadCount(conf), SolrOutputFormat.getSolrWriterQueueSize(conf)); } catch (Exception e) { throw new IllegalStateException(String.format(Locale.ENGLISH, "Failed to initialize record writer for %s, %s", context.getJobName(), conf .get("mapred.task.id")), e); } finally { heartBeater.cancelHeartBeat(); } }
/** * Write a record. This method accumulates records in to a batch, and when * {@link #batchSize} items are present flushes it to the indexer. The writes * can take a substantial amount of time, depending on {@link #batchSize}. If * there is heavy disk contention the writes may take more than the 600 second * default timeout. */ @Override public void write(K key, V value) throws IOException { heartBeater.needHeartBeat(); try { try { SolrInputDocumentWritable sidw = (SolrInputDocumentWritable) value; batch.add(sidw.getSolrInputDocument()); if (batch.size() >= batchSize) { batchWriter.queueBatch(batch); numDocsWritten += batch.size(); if (System.nanoTime() >= nextLogTime) { LOG.info("docsWritten: {}", numDocsWritten); nextLogTime += TimeUnit.NANOSECONDS.convert(10, TimeUnit.SECONDS); } batch.clear(); } } catch (SolrServerException e) { throw new IOException(e); } } finally { heartBeater.cancelHeartBeat(); } }
public void run() { try { executingBatches.getAndIncrement(); result = runUpdate(documents); } finally { executingBatches.getAndDecrement(); } }
heartBeater.needHeartBeat(); if (batch.size() > 0) { batchWriter.queueBatch(batch); numDocsWritten += batch.size(); batch.clear(); batchWriter.close(context);
public SolrRecordWriter(TaskAttemptContext context, Path outputShardDir, int batchSize) { this.batchSize = batchSize; this.batch = new ArrayList(batchSize); Configuration conf = context.getConfiguration(); // setLogLevel("org.apache.solr.core", "WARN"); // setLogLevel("org.apache.solr.update", "WARN"); heartBeater = new HeartBeater(context); try { heartBeater.needHeartBeat(); Path solrHomeDir = SolrRecordWriter.findSolrConfig(conf); FileSystem fs = outputShardDir.getFileSystem(conf); EmbeddedSolrServer solr = createEmbeddedSolrServer(solrHomeDir, fs, outputShardDir); batchWriter = new BatchWriter(solr, batchSize, context.getTaskAttemptID().getTaskID(), SolrOutputFormat.getSolrWriterThreadCount(conf), SolrOutputFormat.getSolrWriterQueueSize(conf)); } catch (Exception e) { throw new IllegalStateException(String.format( "Failed to initialize record writer for %s, %s", context.getJobName(), conf .get("mapred.task.id")), e); } finally { heartBeater.cancelHeartBeat(); } }
/** * Write a record. This method accumulates records in to a batch, and when * {@link #batchSize} items are present flushes it to the indexer. The writes * can take a substantial amount of time, depending on {@link #batchSize}. If * there is heavy disk contention the writes may take more than the 600 second * default timeout. */ @Override public void write(K key, V value) throws IOException { heartBeater.needHeartBeat(); try { try { SolrInputDocumentWritable sidw = (SolrInputDocumentWritable) value; batch.add(sidw.getSolrInputDocument()); if (batch.size() >= batchSize) { batchWriter.queueBatch(batch); numDocsWritten += batch.size(); if (System.currentTimeMillis() >= nextLogTime) { LOG.info("docsWritten: {}", numDocsWritten); nextLogTime += 10000; } batch.clear(); } } catch (SolrServerException e) { throw new IOException(e); } } finally { heartBeater.cancelHeartBeat(); } }