DispatchHintsTask(HintsStore store, UUID hostId) { this.store = store; this.hostId = hostId; // rate limit is in bytes per second. Uses Double.MAX_VALUE if disabled (set to 0 in cassandra.yaml). // max rate is scaled by the number of nodes in the cluster (CASSANDRA-5272). // the goal is to bound maximum hints traffic going towards a particular node from the rest of the cluster, // not total outgoing hints traffic from this node - this is why the rate limiter is not shared between // all the dispatch tasks (as there will be at most one dispatch task for a particular host id at a time). int nodesCount = Math.max(1, StorageService.instance.getTokenMetadata().getAllEndpoints().size() - 1); int throttleInKB = DatabaseDescriptor.getHintedHandoffThrottleInKB() / nodesCount; this.rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024); }
/** * @param keyspaceName keyspace name to check * @return true when the node is bootstrapping, useStrictConsistency is true and # of nodes in the cluster is more than # of replica */ private boolean useStrictSourcesForRanges(String keyspaceName) { AbstractReplicationStrategy strat = Keyspace.open(keyspaceName).getReplicationStrategy(); return useStrictConsistency && tokens != null && metadata.getAllEndpoints().size() != strat.getReplicationFactor(); }
DispatchHintsTask(HintsStore store, UUID hostId) { this.store = store; this.hostId = hostId; // rate limit is in bytes per second. Uses Double.MAX_VALUE if disabled (set to 0 in cassandra.yaml). // max rate is scaled by the number of nodes in the cluster (CASSANDRA-5272). // the goal is to bound maximum hints traffic going towards a particular node from the rest of the cluster, // not total outgoing hints traffic from this node - this is why the rate limiter is not shared between // all the dispatch tasks (as there will be at most one dispatch task for a particular host id at a time). int nodesCount = Math.max(1, StorageService.instance.getTokenMetadata().getAllEndpoints().size() - 1); int throttleInKB = DatabaseDescriptor.getHintedHandoffThrottleInKB() / nodesCount; this.rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024); }
/** * @param keyspaceName keyspace name to check * @return true when the node is bootstrapping, useStrictConsistency is true and # of nodes in the cluster is more than # of replica */ private boolean useStrictSourcesForRanges(String keyspaceName) { AbstractReplicationStrategy strat = Keyspace.open(keyspaceName).getReplicationStrategy(); return useStrictConsistency && tokens != null && metadata.getAllEndpoints().size() != strat.getReplicationFactor(); }
DispatchHintsTask(HintsStore store, UUID hostId) { this.store = store; this.hostId = hostId; // rate limit is in bytes per second. Uses Double.MAX_VALUE if disabled (set to 0 in cassandra.yaml). // max rate is scaled by the number of nodes in the cluster (CASSANDRA-5272). // the goal is to bound maximum hints traffic going towards a particular node from the rest of the cluster, // not total outgoing hints traffic from this node - this is why the rate limiter is not shared between // all the dispatch tasks (as there will be at most one dispatch task for a particular host id at a time). int nodesCount = Math.max(1, StorageService.instance.getTokenMetadata().getAllEndpoints().size() - 1); int throttleInKB = DatabaseDescriptor.getHintedHandoffThrottleInKB() / nodesCount; this.rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024); }
/** * @param keyspaceName keyspace name to check * @return true when the node is bootstrapping, useStrictConsistency is true and # of nodes in the cluster is more than # of replica */ private boolean useStrictSourcesForRanges(String keyspaceName) { AbstractReplicationStrategy strat = Keyspace.open(keyspaceName).getReplicationStrategy(); return useStrictConsistency && tokens != null && metadata.getAllEndpoints().size() != strat.getReplicationFactor(); }
private boolean useStrictSourcesForRanges(String keyspaceName) { AbstractReplicationStrategy strat = Keyspace.open(keyspaceName).getReplicationStrategy(); return isNotReplacingAndUsesStrictConsistency() && tokens != null && metadata.getAllEndpoints().size() != strat.getReplicationFactor(); }
/** * Find the best target to stream hints to. Currently the closest peer according to the snitch */ private UUID getPreferredHintsStreamTarget() { List<InetAddress> candidates = new ArrayList<>(StorageService.instance.getTokenMetadata().cloneAfterAllLeft().getAllEndpoints()); candidates.remove(FBUtilities.getBroadcastAddress()); for (Iterator<InetAddress> iter = candidates.iterator(); iter.hasNext(); ) { InetAddress address = iter.next(); if (!FailureDetector.instance.isAlive(address)) iter.remove(); } if (candidates.isEmpty()) { logger.warn("Unable to stream hints since no live endpoints seen"); throw new RuntimeException("Unable to stream hints since no live endpoints seen"); } else { // stream to the closest peer as chosen by the snitch DatabaseDescriptor.getEndpointSnitch().sortByProximity(FBUtilities.getBroadcastAddress(), candidates); InetAddress hintsDestinationHost = candidates.get(0); return tokenMetadata.getHostId(hintsDestinationHost); } }
/** * Find the best target to stream hints to. Currently the closest peer according to the snitch */ private UUID getPreferredHintsStreamTarget() { List<InetAddress> candidates = new ArrayList<>(StorageService.instance.getTokenMetadata().cloneAfterAllLeft().getAllEndpoints()); candidates.remove(FBUtilities.getBroadcastAddress()); for (Iterator<InetAddress> iter = candidates.iterator(); iter.hasNext(); ) { InetAddress address = iter.next(); if (!FailureDetector.instance.isAlive(address)) iter.remove(); } if (candidates.isEmpty()) { logger.warn("Unable to stream hints since no live endpoints seen"); throw new RuntimeException("Unable to stream hints since no live endpoints seen"); } else { // stream to the closest peer as chosen by the snitch DatabaseDescriptor.getEndpointSnitch().sortByProximity(FBUtilities.getBroadcastAddress(), candidates); InetAddress hintsDestinationHost = candidates.get(0); return tokenMetadata.getHostId(hintsDestinationHost); } }
/** * Find the best target to stream hints to. Currently the closest peer according to the snitch */ private UUID getPreferredHintsStreamTarget() { List<InetAddress> candidates = new ArrayList<>(StorageService.instance.getTokenMetadata().cloneAfterAllLeft().getAllEndpoints()); candidates.remove(FBUtilities.getBroadcastAddress()); for (Iterator<InetAddress> iter = candidates.iterator(); iter.hasNext(); ) { InetAddress address = iter.next(); if (!FailureDetector.instance.isAlive(address)) iter.remove(); } if (candidates.isEmpty()) { logger.warn("Unable to stream hints since no live endpoints seen"); throw new RuntimeException("Unable to stream hints since no live endpoints seen"); } else { // stream to the closest peer as chosen by the snitch DatabaseDescriptor.getEndpointSnitch().sortByProximity(FBUtilities.getBroadcastAddress(), candidates); InetAddress hintsDestinationHost = candidates.get(0); return tokenMetadata.getHostId(hintsDestinationHost); } }
private void replayFailedBatches() { logger.trace("Started replayFailedBatches"); // rate limit is in bytes per second. Uses Double.MAX_VALUE if disabled (set to 0 in cassandra.yaml). // max rate is scaled by the number of nodes in the cluster (same as for HHOM - see CASSANDRA-5272). int endpointsCount = StorageService.instance.getTokenMetadata().getAllEndpoints().size(); if (endpointsCount <= 0) { logger.trace("Replay cancelled as there are no peers in the ring."); return; } int throttleInKB = DatabaseDescriptor.getBatchlogReplayThrottleInKB() / endpointsCount; RateLimiter rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024); UUID limitUuid = UUIDGen.maxTimeUUID(System.currentTimeMillis() - getBatchlogTimeout()); ColumnFamilyStore store = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.BATCHES); int pageSize = calculatePageSize(store); // There cannot be any live content where token(id) <= token(lastReplayedUuid) as every processed batch is // deleted, but the tombstoned content may still be present in the tables. To avoid walking over it we specify // token(id) > token(lastReplayedUuid) as part of the query. String query = String.format("SELECT id, mutations, version FROM %s.%s WHERE token(id) > token(?) AND token(id) <= token(?)", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.BATCHES); UntypedResultSet batches = executeInternalWithPaging(query, pageSize, lastReplayedUuid, limitUuid); processBatchlogEntries(batches, pageSize, rateLimiter); lastReplayedUuid = limitUuid; logger.trace("Finished replayFailedBatches"); }
private void replayFailedBatches() { logger.trace("Started replayFailedBatches"); // rate limit is in bytes per second. Uses Double.MAX_VALUE if disabled (set to 0 in cassandra.yaml). // max rate is scaled by the number of nodes in the cluster (same as for HHOM - see CASSANDRA-5272). int endpointsCount = StorageService.instance.getTokenMetadata().getAllEndpoints().size(); if (endpointsCount <= 0) { logger.trace("Replay cancelled as there are no peers in the ring."); return; } int throttleInKB = DatabaseDescriptor.getBatchlogReplayThrottleInKB() / endpointsCount; RateLimiter rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024); UUID limitUuid = UUIDGen.maxTimeUUID(System.currentTimeMillis() - getBatchlogTimeout()); ColumnFamilyStore store = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.BATCHES); int pageSize = calculatePageSize(store); // There cannot be any live content where token(id) <= token(lastReplayedUuid) as every processed batch is // deleted, but the tombstoned content may still be present in the tables. To avoid walking over it we specify // token(id) > token(lastReplayedUuid) as part of the query. String query = String.format("SELECT id, mutations, version FROM %s.%s WHERE token(id) > token(?) AND token(id) <= token(?)", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.BATCHES); UntypedResultSet batches = executeInternalWithPaging(query, pageSize, lastReplayedUuid, limitUuid); processBatchlogEntries(batches, pageSize, rateLimiter); lastReplayedUuid = limitUuid; logger.trace("Finished replayFailedBatches"); }
private void replayFailedBatches() { logger.trace("Started replayFailedBatches"); // rate limit is in bytes per second. Uses Double.MAX_VALUE if disabled (set to 0 in cassandra.yaml). // max rate is scaled by the number of nodes in the cluster (same as for HHOM - see CASSANDRA-5272). int endpointsCount = StorageService.instance.getTokenMetadata().getAllEndpoints().size(); if (endpointsCount <= 0) { logger.trace("Replay cancelled as there are no peers in the ring."); return; } int throttleInKB = DatabaseDescriptor.getBatchlogReplayThrottleInKB() / endpointsCount; RateLimiter rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024); UUID limitUuid = UUIDGen.maxTimeUUID(System.currentTimeMillis() - getBatchlogTimeout()); ColumnFamilyStore store = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.BATCHES); int pageSize = calculatePageSize(store); // There cannot be any live content where token(id) <= token(lastReplayedUuid) as every processed batch is // deleted, but the tombstoned content may still be present in the tables. To avoid walking over it we specify // token(id) > token(lastReplayedUuid) as part of the query. String query = String.format("SELECT id, mutations, version FROM %s.%s WHERE token(id) > token(?) AND token(id) <= token(?)", SchemaConstants.SYSTEM_KEYSPACE_NAME, SystemKeyspace.BATCHES); UntypedResultSet batches = executeInternalWithPaging(query, pageSize, lastReplayedUuid, limitUuid); processBatchlogEntries(batches, pageSize, rateLimiter); lastReplayedUuid = limitUuid; logger.trace("Finished replayFailedBatches"); }
private void replayAllFailedBatches() throws ExecutionException, InterruptedException { logger.debug("Started replayAllFailedBatches"); // rate limit is in bytes per second. Uses Double.MAX_VALUE if disabled (set to 0 in cassandra.yaml). // max rate is scaled by the number of nodes in the cluster (same as for HHOM - see CASSANDRA-5272). int throttleInKB = DatabaseDescriptor.getBatchlogReplayThrottleInKB() / StorageService.instance.getTokenMetadata().getAllEndpoints().size(); RateLimiter rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024); UntypedResultSet page = executeInternal(String.format("SELECT id, data, written_at, version FROM %s.%s LIMIT %d", Keyspace.SYSTEM_KS, SystemKeyspace.BATCHLOG_CF, PAGE_SIZE)); while (!page.isEmpty()) { UUID id = processBatchlogPage(page, rateLimiter); if (page.size() < PAGE_SIZE) break; // we've exhausted the batchlog, next query would be empty. page = executeInternal(String.format("SELECT id, data, written_at, version FROM %s.%s WHERE token(id) > token(?) LIMIT %d", Keyspace.SYSTEM_KS, SystemKeyspace.BATCHLOG_CF, PAGE_SIZE), id); } cleanup(); logger.debug("Finished replayAllFailedBatches"); }
List<InetAddress> candidates = new ArrayList<>(StorageService.instance.getTokenMetadata().cloneAfterAllLeft().getAllEndpoints()); candidates.remove(FBUtilities.getBroadcastAddress()); for (Iterator<InetAddress> iter = candidates.iterator(); iter.hasNext(); )
/ (StorageService.instance.getTokenMetadata().getAllEndpoints().size() - 1); RateLimiter rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024);