public void initialize() { if ( initialized == true ) return; synchronized(this) { if ( initialized == true ) return; if ( getBatchSize() <= 0 ) { withBatchSize(1); logger.warn("batchSize should be 1 or greater--setting batchSize to 1"); } if ( transactionSize > 1 ) usingTransactions = true; // if threadCount is negative or 0, use one thread per host if ( getThreadCount() <= 0 ) { withThreadCount( hostInfos.length ); logger.warn("threadCount should be 1 or greater--setting threadCount to number of hosts ({})", hostInfos.length); } // create a thread pool where threads are kept alive for up to one minute of inactivity, // max queue size is threadCount * 3, and callers run tasks past the max queue size threadPool = new CompletableThreadPoolExecutor(getThreadCount(), getThreadCount(), 1, TimeUnit.MINUTES, new LinkedBlockingQueue<Runnable>(getThreadCount() * 3)); threadPool.allowCoreThreadTimeOut(true); initialized = true; logger.info("threadCount={}", getThreadCount()); logger.info("batchSize={}", getBatchSize()); if ( usingTransactions == true ) logger.info("transactionSize={}", transactionSize); jobStartTime = Calendar.getInstance(); started.set(true); } }
@Override public WriteBatcher add(DocumentWriteOperation writeOperation) { if ( writeOperation.getUri() == null ) throw new IllegalArgumentException("uri must not be null"); if ( writeOperation.getContent() == null ) throw new IllegalArgumentException("contentHandle must not be null"); initialize(); requireNotStopped(); queue.add(writeOperation); logger.trace("add uri={}", writeOperation.getUri()); // if we have queued batchSize, it's time to flush a batch long recordNum = batchCounter.incrementAndGet(); boolean timeToWriteBatch = (recordNum % getBatchSize()) == 0; if ( timeToWriteBatch ) { BatchWriteSet writeSet = newBatchWriteSet(false); int i=0; for ( ; i < getBatchSize(); i++ ) { DocumentWriteOperation doc = queue.poll(); if ( doc != null ) { writeSet.getWriteSet().add(doc); } else { // strange, there should have been a full batch of docs in the queue... break; } } if ( writeSet.getWriteSet().size() > 0 ) { threadPool.submit( new BatchWriter(writeSet) ); } } return this; }
for ( ; j < getBatchSize() && iter.hasNext(); j++ ) { DocumentWriteOperation doc = iter.next(); writeSet.getWriteSet().add(doc);