public void doVerb(MessageIn<UUID> message, int id) { BatchlogManager.remove(message.payload); } }
public void forceBatchlogReplay() throws Exception { startBatchlogReplay().get(); }
public static void store(Batch batch) { store(batch, true); }
private void replayFailedBatches() { logger.trace("Started replayFailedBatches"); // rate limit is in bytes per second. Uses Double.MAX_VALUE if disabled (set to 0 in cassandra.yaml). // max rate is scaled by the number of nodes in the cluster (same as for HHOM - see CASSANDRA-5272). int endpointsCount = StorageService.instance.getTokenMetadata().getAllEndpoints().size(); if (endpointsCount <= 0) { logger.trace("Replay cancelled as there are no peers in the ring."); return; } int throttleInKB = DatabaseDescriptor.getBatchlogReplayThrottleInKB() / endpointsCount; RateLimiter rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024); UUID limitUuid = UUIDGen.maxTimeUUID(System.currentTimeMillis() - getBatchlogTimeout()); ColumnFamilyStore store = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.BATCHES); int pageSize = calculatePageSize(store); // There cannot be any live content where token(id) <= token(lastReplayedUuid) as every processed batch is // deleted, but the tombstoned content may still be present in the tables. To avoid walking over it we specify // token(id) > token(lastReplayedUuid) as part of the query. String query = String.format("SELECT id, mutations, version FROM %s.%s WHERE token(id) > token(?) AND token(id) <= token(?)", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.BATCHES); UntypedResultSet batches = executeInternalWithPaging(query, pageSize, lastReplayedUuid, limitUuid); processBatchlogEntries(batches, pageSize, rateLimiter); lastReplayedUuid = limitUuid; logger.trace("Finished replayFailedBatches"); }
remove(id); // no write mutations were sent (either expired or all CFs involved truncated). ++totalBatchesReplayed; remove(id); finishAndClearBatches(unfinishedBatches, hintedNodes, replayedBatches); positionInPage = 0; finishAndClearBatches(unfinishedBatches, hintedNodes, replayedBatches);
@SuppressWarnings("deprecation") public static void migrate() { ColumnFamilyStore store = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.LEGACY_BATCHLOG); // nothing to migrate if (store.isEmpty()) return; logger.info("Migrating legacy batchlog to new storage"); int convertedBatches = 0; String query = String.format("SELECT id, data, written_at, version FROM %s.%s", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.LEGACY_BATCHLOG); int pageSize = BatchlogManager.calculatePageSize(store); UntypedResultSet rows = QueryProcessor.executeInternalWithPaging(query, pageSize); for (UntypedResultSet.Row row : rows) { if (apply(row, convertedBatches)) convertedBatches++; } if (convertedBatches > 0) Keyspace.openAndGetStore(SystemKeyspace.LegacyBatchlog).truncateBlocking(); }
long timeout = Math.max(RING_DELAY, BatchlogManager.instance.getBatchlogTimeout()); setMode(Mode.LEAVING, "sleeping " + timeout + " ms for batch processing and pending range setup", true); Thread.sleep(timeout);
BatchlogManager.instance.start();
BatchlogManager.instance.shutdown(); HintsService.instance.pauseDispatch();
private void replayFailedBatches() { logger.trace("Started replayFailedBatches"); // rate limit is in bytes per second. Uses Double.MAX_VALUE if disabled (set to 0 in cassandra.yaml). // max rate is scaled by the number of nodes in the cluster (same as for HHOM - see CASSANDRA-5272). int endpointsCount = StorageService.instance.getTokenMetadata().getAllEndpoints().size(); if (endpointsCount <= 0) { logger.trace("Replay cancelled as there are no peers in the ring."); return; } int throttleInKB = DatabaseDescriptor.getBatchlogReplayThrottleInKB() / endpointsCount; RateLimiter rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024); UUID limitUuid = UUIDGen.maxTimeUUID(System.currentTimeMillis() - getBatchlogTimeout()); ColumnFamilyStore store = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.BATCHES); int pageSize = calculatePageSize(store); // There cannot be any live content where token(id) <= token(lastReplayedUuid) as every processed batch is // deleted, but the tombstoned content may still be present in the tables. To avoid walking over it we specify // token(id) > token(lastReplayedUuid) as part of the query. String query = String.format("SELECT id, mutations, version FROM %s.%s WHERE token(id) > token(?) AND token(id) <= token(?)", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.BATCHES); UntypedResultSet batches = executeInternalWithPaging(query, pageSize, lastReplayedUuid, limitUuid); processBatchlogEntries(batches, pageSize, rateLimiter); lastReplayedUuid = limitUuid; logger.trace("Finished replayFailedBatches"); }
remove(id); // no write mutations were sent (either expired or all CFs involved truncated). ++totalBatchesReplayed; remove(id); finishAndClearBatches(unfinishedBatches, hintedNodes, replayedBatches); positionInPage = 0; finishAndClearBatches(unfinishedBatches, hintedNodes, replayedBatches);
@SuppressWarnings("deprecation") public static void migrate() { ColumnFamilyStore store = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.LEGACY_BATCHLOG); // nothing to migrate if (store.isEmpty()) return; logger.info("Migrating legacy batchlog to new storage"); int convertedBatches = 0; String query = String.format("SELECT id, data, written_at, version FROM %s.%s", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.LEGACY_BATCHLOG); int pageSize = BatchlogManager.calculatePageSize(store); UntypedResultSet rows = QueryProcessor.executeInternalWithPaging(query, pageSize); for (UntypedResultSet.Row row : rows) { if (apply(row, convertedBatches)) convertedBatches++; } if (convertedBatches > 0) Keyspace.openAndGetStore(SystemKeyspace.LegacyBatchlog).truncateBlocking(); }
long timeout = Math.max(RING_DELAY, BatchlogManager.instance.getBatchlogTimeout()); setMode(Mode.LEAVING, "sleeping " + timeout + " ms for batch processing and pending range setup", true); Thread.sleep(timeout);
BatchlogManager.instance.start();
BatchlogManager.instance.shutdown(); HintsService.instance.pauseDispatch();
private void replayFailedBatches() { logger.trace("Started replayFailedBatches"); // rate limit is in bytes per second. Uses Double.MAX_VALUE if disabled (set to 0 in cassandra.yaml). // max rate is scaled by the number of nodes in the cluster (same as for HHOM - see CASSANDRA-5272). int endpointsCount = StorageService.instance.getTokenMetadata().getAllEndpoints().size(); if (endpointsCount <= 0) { logger.trace("Replay cancelled as there are no peers in the ring."); return; } int throttleInKB = DatabaseDescriptor.getBatchlogReplayThrottleInKB() / endpointsCount; RateLimiter rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024); UUID limitUuid = UUIDGen.maxTimeUUID(System.currentTimeMillis() - getBatchlogTimeout()); ColumnFamilyStore store = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.BATCHES); int pageSize = calculatePageSize(store); // There cannot be any live content where token(id) <= token(lastReplayedUuid) as every processed batch is // deleted, but the tombstoned content may still be present in the tables. To avoid walking over it we specify // token(id) > token(lastReplayedUuid) as part of the query. String query = String.format("SELECT id, mutations, version FROM %s.%s WHERE token(id) > token(?) AND token(id) <= token(?)", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.BATCHES); UntypedResultSet batches = executeInternalWithPaging(query, pageSize, lastReplayedUuid, limitUuid); processBatchlogEntries(batches, pageSize, rateLimiter); lastReplayedUuid = limitUuid; logger.trace("Finished replayFailedBatches"); }
public static void store(Batch batch) { store(batch, true); }
remove(id); // no write mutations were sent (either expired or all CFs involved truncated). ++totalBatchesReplayed; remove(id); finishAndClearBatches(unfinishedBatches, hintedNodes, replayedBatches); positionInPage = 0; finishAndClearBatches(unfinishedBatches, hintedNodes, replayedBatches);
public void doVerb(MessageIn<UUID> message, int id) { BatchlogManager.remove(message.payload); } }
public void forceBatchlogReplay() throws Exception { startBatchlogReplay().get(); }