private void processQuarantinedItems() throws ProcessingException { // process the quarantined items and remove them as they're processed quarantineWriteLock.lock(); try { if (LOGGER.isLoggable(Level.CONFIG)) LOGGER.config(getThreadName() + " : processQuarantinedItems() : processing " + quarantined.size() + " quarantined items"); // don't process work if this node's operations have been disabled if (cluster != null && !cluster.areOperationsEnabled()) { reassemble(); } else { if (group.getConfig().isBatchingEnabled() && group.getConfig().getBatchSize() > 0) { processBatchedItems(); } else { processSingleItem(); } } quarantined = null; } finally { quarantineWriteLock.unlock(); } }
int executionsLeft = group.getConfig().getRetryAttempts() + 1; while (executionsLeft-- > 0) { try { } else { if (LOGGER.isLoggable(Level.WARNING)) LOGGER.warning(getThreadName() + " : processBatchedItems() : exception during processing, retrying in " + group.getConfig().getRetryAttemptDelay() + " milliseconds, " + executionsLeft + " retries left : " + e.getMessage()); try { Thread.sleep(group.getConfig().getRetryAttemptDelay()); } catch (InterruptedException e1) { Thread.currentThread().interrupt();
int tryToStealFromOthers(final ProcessingBucket<I> thief) { if (null == thief) throw new IllegalArgumentException("thief can't be null"); if (!config.isStealingEnabled()) { return 0;
final int batchSize = group.getConfig().getBatchSize(); if (group.getConfig().isBatchingEnabled() && batchSize > 0) { if (workSize < batchSize && group.getConfig().getMaxAllowedFallBehind() > lastProcessing - lastWorkDone) { if (LOGGER.isLoggable(Level.FINER)) LOGGER.finer(getThreadName() + " : processItems() : only " + workSize + " work items available, waiting for " + batchSize + " items to fill up a batch"); final int rateLimit = group.getConfig().getRateLimit(); if (rateLimit > 0) { final long secondsSinceLastWorkDone;
/** * Creates a new coordinator instance with a custom configuration and steal policy. * <p/> * In case {@code null} is provided for either argument, the corresponding default will be used. For the * configuration, the default is {@link DefaultAsyncConfig} and for the steal policy, the default is * {@link FallBehindStealPolicy}. The {@code maxAllowedFallBehind} value of the default steal policy will be retrieved * from the configuration. * * @param config the custom configuration instance that should be used by this coordinator, if {@code null} is * provided the default configuration will be used * @param stealPolicy the custom steal policy that should be used by this coordinator, if {@code null} is provided the * default steal policy will be used */ public AsyncCoordinator(AsyncConfig config, StealPolicy<I> stealPolicy) { if (null == config) { config = DefaultAsyncConfig.getInstance(); } if (null == stealPolicy) { stealPolicy = new FallBehindStealPolicy<I>(config.getMaxAllowedFallBehind()); } this.group = new ProcessingBucketGroup(config, stealPolicy); this.coordinatorLock = new TerracottaReadWriteLock(config.isSynchronousWrite()); this.coordinatorWriteLock = coordinatorLock.writeLock(); this.coordinatorReadLock = coordinatorLock.readLock(); }
public ProcessingBucketGroup(final ClusterInfo cluster, final AsyncConfig config, final StealPolicy<I> policy) { this.cluster = ClusterInfoUtil.determineDsoClusterInstance(cluster); this.config = config; this.policy = policy; this.buckets = newMap(); this.groupLock = new TerracottaReadWriteLock(config.isSynchronousWrite()); this.groupWriteLock = groupLock.writeLock(); this.groupReadLock = groupLock.readLock(); }
private int determineBatchSize() { int batchSize = group.getConfig().getBatchSize(); if (quarantined.size() < batchSize) { batchSize = quarantined.size(); } return batchSize; }
final int batchSize = group.getConfig().getBatchSize(); if (group.getConfig().isBatchingEnabled() && batchSize > 0) { if (workSize < batchSize && group.getConfig().getMaxAllowedFallBehind() > lastProcessing - lastWorkDone) { if (LOGGER.isLoggable(Level.FINER)) LOGGER.finer(getThreadName() + " : processItems() : only " + workSize + " work items available, waiting for " + batchSize + " items to fill up a batch"); final int rateLimit = group.getConfig().getRateLimit(); if (rateLimit > 0) { final long secondsSinceLastWorkDone;
/** * Creates a new coordinator instance with a custom configuration and steal policy. * <p/> * In case {@code null} is provided for either argument, the corresponding default will be used. For the * configuration, the default is {@link DefaultAsyncConfig} and for the steal policy, the default is * {@link FallBehindStealPolicy}. The {@code maxAllowedFallBehind} value of the default steal policy will be retrieved * from the configuration. * * @param config the custom configuration instance that should be used by this coordinator, if {@code null} is * provided the default configuration will be used * @param stealPolicy the custom steal policy that should be used by this coordinator, if {@code null} is provided the * default steal policy will be used */ public AsyncCoordinator(AsyncConfig config, StealPolicy<I> stealPolicy) { if (null == config) { config = DefaultAsyncConfig.getInstance(); } if (null == stealPolicy) { stealPolicy = new FallBehindStealPolicy<I>(config.getMaxAllowedFallBehind()); } this.group = new ProcessingBucketGroup(config, stealPolicy); this.coordinatorLock = new TerracottaReadWriteLock(config.isSynchronousWrite()); this.coordinatorWriteLock = coordinatorLock.writeLock(); this.coordinatorReadLock = coordinatorLock.readLock(); }
public ProcessingBucketGroup(final ClusterInfo cluster, final AsyncConfig config, final StealPolicy<I> policy) { this.cluster = ClusterInfoUtil.determineDsoClusterInstance(cluster); this.config = config; this.policy = policy; this.buckets = newMap(); this.groupLock = new TerracottaReadWriteLock(config.isSynchronousWrite()); this.groupWriteLock = groupLock.writeLock(); this.groupReadLock = groupLock.readLock(); }
private int determineBatchSize() { int batchSize = group.getConfig().getBatchSize(); if (quarantined.size() < batchSize) { batchSize = quarantined.size(); } return batchSize; }
final int batchSize = group.getConfig().getBatchSize(); if (group.getConfig().isBatchingEnabled() && batchSize > 0) { if (workSize < batchSize && group.getConfig().getMaxAllowedFallBehind() > lastProcessing - lastWorkDone) { if (LOGGER.isLoggable(Level.FINER)) LOGGER.finer(getThreadName() + " : processItems() : only " + workSize + " work items available, waiting for " + batchSize + " items to fill up a batch"); final int rateLimit = group.getConfig().getRateLimit(); if (rateLimit > 0) { final long secondsSinceLastWorkDone;
int executionsLeft = group.getConfig().getRetryAttempts() + 1; while (executionsLeft-- > 0) { try { LOGGER.warning(getThreadName() + " : processBatchedItems() : exception during processing, retrying in " + group.getConfig().getRetryAttemptDelay() + " milliseconds, " + executionsLeft + " retries left : " + e.getMessage()); try { Thread.sleep(group.getConfig().getRetryAttemptDelay()); } catch (InterruptedException e1) { Thread.currentThread().interrupt();
/** * Creates a new coordinator instance with a custom configuration and steal policy. * <p/> * In case {@code null} is provided for either argument, the corresponding default will be used. For the * configuration, the default is {@link DefaultAsyncConfig} and for the steal policy, the default is * {@link FallBehindStealPolicy}. The {@code maxAllowedFallBehind} value of the default steal policy will be retrieved * from the configuration. * * @param config the custom configuration instance that should be used by this coordinator, if {@code null} is * provided the default configuration will be used * @param stealPolicy the custom steal policy that should be used by this coordinator, if {@code null} is provided the * default steal policy will be used */ public AsyncCoordinator(AsyncConfig config, StealPolicy<I> stealPolicy) { if (null == config) { config = DefaultAsyncConfig.getInstance(); } if (null == stealPolicy) { stealPolicy = new FallBehindStealPolicy<I>(config.getMaxAllowedFallBehind()); } this.group = new ProcessingBucketGroup(config, stealPolicy); this.coordinatorLock = new TerracottaReadWriteLock(config.isSynchronousWrite()); this.coordinatorWriteLock = coordinatorLock.writeLock(); this.coordinatorReadLock = coordinatorLock.readLock(); }
private void processQuarantinedItems() throws ProcessingException { // process the quarantined items and remove them as they're processed quarantineWriteLock.lock(); try { if (LOGGER.isLoggable(Level.CONFIG)) LOGGER.config(getThreadName() + " : processQuarantinedItems() : processing " + quarantined.size() + " quarantined items"); // don't process work if this node's operations have been disabled if (cluster != null && !cluster.areOperationsEnabled()) { reassemble(); } else { if (group.getConfig().isBatchingEnabled() && group.getConfig().getBatchSize() > 0) { processBatchedItems(); } else { processSingleItem(); } } quarantined = null; } finally { quarantineWriteLock.unlock(); } }
public ProcessingBucketGroup(final ClusterInfo cluster, final AsyncConfig config, final StealPolicy<I> policy) { this.cluster = ClusterInfoUtil.determineDsoClusterInstance(cluster); this.config = config; this.policy = policy; this.buckets = newMap(); this.groupLock = new TerracottaReadWriteLock(config.isSynchronousWrite()); this.groupWriteLock = groupLock.writeLock(); this.groupReadLock = groupLock.readLock(); }
private int determineBatchSize() { int batchSize = group.getConfig().getBatchSize(); if (quarantined.size() < batchSize) { batchSize = quarantined.size(); } return batchSize; }
int tryToStealFromOthers(final ProcessingBucket<I> thief) { if (null == thief) throw new IllegalArgumentException("thief can't be null"); if (!config.isStealingEnabled()) { return 0;
int executionsLeft = group.getConfig().getRetryAttempts() + 1; while (executionsLeft-- > 0) { try { } else { if (LOGGER.isLoggable(Level.WARNING)) LOGGER.warning(getThreadName() + " : processBatchedItems() : exception during processing, retrying in " + group.getConfig().getRetryAttemptDelay() + " milliseconds, " + executionsLeft + " retries left : " + e.getMessage()); try { Thread.sleep(group.getConfig().getRetryAttemptDelay()); } catch (InterruptedException e1) { Thread.currentThread().interrupt();
private void processQuarantinedItems() throws ProcessingException { // process the quarantined items and remove them as they're processed quarantineWriteLock.lock(); try { if (LOGGER.isLoggable(Level.CONFIG)) LOGGER.config(getThreadName() + " : processQuarantinedItems() : processing " + quarantined.size() + " quarantined items"); // don't process work if this node's operations have been disabled if (cluster != null && !cluster.areOperationsEnabled()) { reassemble(); } else { if (group.getConfig().isBatchingEnabled() && group.getConfig().getBatchSize() > 0) { processBatchedItems(); } else { processSingleItem(); } } quarantined = null; } finally { quarantineWriteLock.unlock(); } }