@Override public void bulkInsertTimelineChunks(final List<TimelineChunk> timelineChunkList, final InternalCallContext context) { delegate.bulkInsertTimelineChunks(timelineChunkList, context); }
public synchronized void addPendingChunkMap(final PendingChunkMap chunkMap) { if (shuttingDown.get()) { log.error("In addPendingChunkMap(), but finishBackgroundWritingAndExit is true!"); } else { if (performForegroundWrites) { foregroundChunkMapsWritten.incrementAndGet(); final List<TimelineChunk> chunksToWrite = new ArrayList<TimelineChunk>(chunkMap.getChunkMap().values()); foregroundChunksWritten.addAndGet(chunksToWrite.size()); timelineDAO.bulkInsertTimelineChunks(chunksToWrite, createCallContext()); chunkMap.getAccumulator().markPendingChunkMapConsumed(chunkMap.getPendingChunkMapId()); } else { pendingChunkMapsAdded.incrementAndGet(); final int chunkCount = chunkMap.getChunkCount(); pendingChunksAdded.addAndGet(chunkCount); pendingChunks.add(chunkMap); pendingChunkCount.addAndGet(chunkCount); } } }
private void performBackgroundWrites() { backgroundWritesCount.incrementAndGet(); List<PendingChunkMap> chunkMapsToWrite = null; synchronized (this) { chunkMapsToWrite = pendingChunks; pendingChunks = new ArrayList<PendingChunkMap>(); pendingChunkCount.set(0); } final List<TimelineChunk> chunks = new ArrayList<TimelineChunk>(); for (final PendingChunkMap map : chunkMapsToWrite) { pendingChunkMapsWritten.incrementAndGet(); pendingChunksWritten.addAndGet(map.getChunkMap().size()); chunks.addAll(map.getChunkMap().values()); } timelineDAO.bulkInsertTimelineChunks(chunks, createCallContext()); for (final PendingChunkMap map : chunkMapsToWrite) { pendingChunkMapsMarkedConsumed.incrementAndGet(); map.getAccumulator().markPendingChunkMapConsumed(map.getPendingChunkMapId()); } }
private void performWrites() { final InternalCallContext context = createCallContext(); // This is the atomic operation: bulk insert the new aggregated TimelineChunk objects, and delete // or invalidate the ones that were aggregated. This should be very fast. final long startWriteTime = System.currentTimeMillis(); aggregatorSqlDao.begin(); timelineDao.bulkInsertTimelineChunks(chunksToWrite, context); if (config.getDeleteAggregatedChunks()) { aggregatorSqlDao.deleteTimelineChunks(chunkIdsToInvalidateOrDelete, context); } else { aggregatorSqlDao.makeTimelineChunksInvalid(chunkIdsToInvalidateOrDelete, context); } aggregatorSqlDao.commit(); msWritingDb.addAndGet(System.currentTimeMillis() - startWriteTime); timelineChunksWritten.addAndGet(chunksToWrite.size()); timelineChunksInvalidatedOrDeleted.addAndGet(chunkIdsToInvalidateOrDelete.size()); chunksToWrite.clear(); chunkIdsToInvalidateOrDelete.clear(); final long sleepMs = config.getAggregationSleepBetweenBatches().getMillis(); if (sleepMs > 0) { final long timeBeforeSleep = System.currentTimeMillis(); try { Thread.sleep(sleepMs); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } msSpentSleeping.addAndGet(System.currentTimeMillis() - timeBeforeSleep); } timelineChunkBatchesProcessed.incrementAndGet(); }