@Override public SolrInputDocumentWritable next() { return new SolrInputDocumentWritable(parent.next()); }
@Override public SolrInputDocument next() { return parent.next().getSolrInputDocument(); }
@Override public SolrInputDocument next() { return parent.next().getSolrInputDocument(); }
@Override public SolrInputDocumentWritable next() { return new SolrInputDocumentWritable(parent.next()); }
/** * Write a record. This method accumulates records in to a batch, and when * {@link #batchSize} items are present flushes it to the indexer. The writes * can take a substantial amount of time, depending on {@link #batchSize}. If * there is heavy disk contention the writes may take more than the 600 second * default timeout. */ @Override public void write(K key, V value) throws IOException { heartBeater.needHeartBeat(); try { try { SolrInputDocumentWritable sidw = (SolrInputDocumentWritable) value; batch.add(sidw.getSolrInputDocument()); if (batch.size() >= batchSize) { batchWriter.queueBatch(batch); numDocsWritten += batch.size(); if (System.nanoTime() >= nextLogTime) { LOG.info("docsWritten: {}", numDocsWritten); nextLogTime += TimeUnit.NANOSECONDS.convert(10, TimeUnit.SECONDS); } batch.clear(); } } catch (SolrServerException e) { throw new IOException(e); } } finally { heartBeater.cancelHeartBeat(); } }
@Override public void add(int shard, Map<String, SolrInputDocument> inputDocumentMap) throws SolrServerException, IOException { for (Entry<String, SolrInputDocument> documentEntry : inputDocumentMap.entrySet()) { try { context.write( new Text(documentEntry.getKey()), new SolrInputDocumentWritable(documentEntry.getValue())); context.getCounter(HBaseIndexerCounters.OUTPUT_INDEX_DOCUMENTS).increment(1L); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } }
/** * Write a record. This method accumulates records in to a batch, and when * {@link #batchSize} items are present flushes it to the indexer. The writes * can take a substantial amount of time, depending on {@link #batchSize}. If * there is heavy disk contention the writes may take more than the 600 second * default timeout. */ @Override public void write(K key, V value) throws IOException { heartBeater.needHeartBeat(); try { try { SolrInputDocumentWritable sidw = (SolrInputDocumentWritable) value; batch.add(sidw.getSolrInputDocument()); if (batch.size() >= batchSize) { batchWriter.queueBatch(batch); numDocsWritten += batch.size(); if (System.nanoTime() >= nextLogTime) { LOG.info("docsWritten: {}", numDocsWritten); nextLogTime += TimeUnit.NANOSECONDS.convert(10, TimeUnit.SECONDS); } batch.clear(); } } catch (SolrServerException e) { throw new IOException(e); } } finally { heartBeater.cancelHeartBeat(); } }
@Override public void add(int shard, Map<String, SolrInputDocument> inputDocumentMap) throws SolrServerException, IOException { for (Entry<String, SolrInputDocument> documentEntry : inputDocumentMap.entrySet()) { try { context.write( new Text(documentEntry.getKey()), new SolrInputDocumentWritable(documentEntry.getValue())); context.getCounter(HBaseIndexerCounters.OUTPUT_INDEX_DOCUMENTS).increment(1L); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } }
/** * Write a record. This method accumulates records in to a batch, and when * {@link #batchSize} items are present flushes it to the indexer. The writes * can take a substantial amount of time, depending on {@link #batchSize}. If * there is heavy disk contention the writes may take more than the 600 second * default timeout. */ @Override public void write(K key, V value) throws IOException { heartBeater.needHeartBeat(); try { try { SolrInputDocumentWritable sidw = (SolrInputDocumentWritable) value; batch.add(sidw.getSolrInputDocument()); if (batch.size() >= batchSize) { batchWriter.queueBatch(batch); numDocsWritten += batch.size(); if (System.nanoTime() >= nextLogTime) { LOG.info("docsWritten: {}", numDocsWritten); nextLogTime += TimeUnit.NANOSECONDS.convert(10, TimeUnit.SECONDS); } batch.clear(); } } catch (SolrServerException e) { throw new IOException(e); } } finally { heartBeater.cancelHeartBeat(); } }
@Override public void load(SolrInputDocument doc) throws IOException, SolrServerException { String uniqueKeyFieldName = getSchema().getUniqueKeyField().getName(); Object id = doc.getFieldValue(uniqueKeyFieldName); if (id == null) { throw new IllegalArgumentException("Missing value for (required) unique document key: " + uniqueKeyFieldName + " (see Solr schema.xml)"); } try { context.write(new Text(id.toString()), new SolrInputDocumentWritable(doc)); } catch (InterruptedException e) { throw new IOException("Interrupted while writing " + doc, e); } if (LOG.isDebugEnabled()) { long numParserOutputBytes = 0; for (SolrInputField field : doc.values()) { numParserOutputBytes += sizeOf(field.getValue()); } context.getCounter(MorphlineCounters.class.getName(), MorphlineCounters.PARSER_OUTPUT_BYTES.toString()).increment(numParserOutputBytes); } context.getCounter(MorphlineCounters.class.getName(), MorphlineCounters.DOCS_READ.toString()).increment(1); }
@Override public int getPartition(Text key, SolrInputDocumentWritable value, int numPartitions) { DocRouter docRouter = docCollection.getRouter(); SolrInputDocument doc = value.getSolrInputDocument(); String keyStr = key.toString();
@Override public void load(SolrInputDocument doc) throws IOException, SolrServerException { String uniqueKeyFieldName = getSchema().getUniqueKeyField().getName(); Object id = doc.getFieldValue(uniqueKeyFieldName); if (id == null) { throw new IllegalArgumentException("Missing value for (required) unique document key: " + uniqueKeyFieldName + " (see Solr schema.xml)"); } try { context.write(new Text(id.toString()), new SolrInputDocumentWritable(doc)); } catch (InterruptedException e) { throw new IOException("Interrupted while writing " + doc, e); } if (LOG.isDebugEnabled()) { long numParserOutputBytes = 0; for (SolrInputField field : doc.values()) { numParserOutputBytes += sizeOf(field.getValue()); } context.getCounter(MorphlineCounters.class.getName(), MorphlineCounters.PARSER_OUTPUT_BYTES.toString()).increment(numParserOutputBytes); } context.getCounter(MorphlineCounters.class.getName(), MorphlineCounters.DOCS_READ.toString()).increment(1); }
/** * Write a record. This method accumulates records in to a batch, and when * {@link #batchSize} items are present flushes it to the indexer. The writes * can take a substantial amount of time, depending on {@link #batchSize}. If * there is heavy disk contention the writes may take more than the 600 second * default timeout. */ @Override public void write(K key, V value) throws IOException { heartBeater.needHeartBeat(); try { try { SolrInputDocumentWritable sidw = (SolrInputDocumentWritable) value; batch.add(sidw.getSolrInputDocument()); if (batch.size() >= batchSize) { batchWriter.queueBatch(batch); numDocsWritten += batch.size(); if (System.currentTimeMillis() >= nextLogTime) { LOG.info("docsWritten: {}", numDocsWritten); nextLogTime += 10000; } batch.clear(); } } catch (SolrServerException e) { throw new IOException(e); } } finally { heartBeater.cancelHeartBeat(); } }
@Override public int getPartition(Text key, SolrInputDocumentWritable value, int numPartitions) { DocRouter docRouter = docCollection.getRouter(); SolrInputDocument doc = value.getSolrInputDocument(); String keyStr = key.toString();