if (currentPartitionCount != prevPartitionCount) { log.warn(String.format("Change of partition count detected in stream %s. old partition count: %d, current partition count: %d", systemStream.toString(), prevPartitionCount, currentPartitionCount)); if (currentPartitionCount > prevPartitionCount) { log.error(String.format("Shutting down (stateful) or restarting (stateless) the job since current " + "partition count %d is greater than the old partition count %d for stream %s.", currentPartitionCount, prevPartitionCount, systemStream.toString())); streamsChanged.add(systemStream); log.error(String.format("Error comparing partition count differences for stream: %s", metadataEntry.getKey().toString()));
if (currentPartitionCount != prevPartitionCount) { log.warn(String.format("Change of partition count detected in stream %s. old partition count: %d, current partition count: %d", systemStream.toString(), prevPartitionCount, currentPartitionCount)); if (currentPartitionCount > prevPartitionCount) { log.error(String.format("Shutting down (stateful) or restarting (stateless) the job since current " + "partition count %d is greater than the old partition count %d for stream %s.", currentPartitionCount, prevPartitionCount, systemStream.toString())); streamsChanged.add(systemStream); log.error(String.format("Error comparing partition count differences for stream: %s", metadataEntry.getKey().toString()));
if (currentPartitionCount != prevPartitionCount) { log.warn(String.format("Change of partition count detected in stream %s. old partition count: %d, current partition count: %d", systemStream.toString(), prevPartitionCount, currentPartitionCount)); if (currentPartitionCount > prevPartitionCount) { log.error(String.format("Shutting down (stateful) or restarting (stateless) the job since current " + "partition count %d is greater than the old partition count %d for stream %s.", currentPartitionCount, prevPartitionCount, systemStream.toString())); streamsChanged.add(systemStream); log.error(String.format("Error comparing partition count differences for stream: %s", metadataEntry.getKey().toString()));
if (currentPartitionCount != prevPartitionCount) { log.warn(String.format("Change of partition count detected in stream %s. old partition count: %d, current partition count: %d", systemStream.toString(), prevPartitionCount, currentPartitionCount)); if (currentPartitionCount > prevPartitionCount) { log.error(String.format("Shutting down (stateful) or restarting (stateless) the job since current " + "partition count %d is greater than the old partition count %d for stream %s.", currentPartitionCount, prevPartitionCount, systemStream.toString())); streamsChanged.add(systemStream); log.error(String.format("Error comparing partition count differences for stream: %s", metadataEntry.getKey().toString()));
if (currentPartitionCount != prevPartitionCount) { log.warn(String.format("Change of partition count detected in stream %s. old partition count: %d, current partition count: %d", systemStream.toString(), prevPartitionCount, currentPartitionCount)); if (currentPartitionCount > prevPartitionCount) { log.error(String.format("Shutting down (stateful) or restarting (stateless) the job since current " + "partition count %d is greater than the old partition count %d for stream %s.", currentPartitionCount, prevPartitionCount, systemStream.toString())); streamsChanged.add(systemStream); log.error(String.format("Error comparing partition count differences for stream: %s", metadataEntry.getKey().toString()));
int partitionEnd = Integer.valueOf(partitionSegment.substring(partitionSegment.lastIndexOf("-") + 1, partitionSegment.indexOf("]"))); if (partitionStart > partitionEnd) { LOGGER.warn("The starting partition in stream " + systemStream.toString() + " is bigger than the ending Partition. No partition is added");
int partitionEnd = Integer.valueOf(partitionSegment.substring(partitionSegment.lastIndexOf("-") + 1, partitionSegment.indexOf("]"))); if (partitionStart > partitionEnd) { LOGGER.warn("The starting partition in stream " + systemStream.toString() + " is bigger than the ending Partition. No partition is added");
int partitionEnd = Integer.valueOf(partitionSegment.substring(partitionSegment.lastIndexOf("-") + 1, partitionSegment.indexOf("]"))); if (partitionStart > partitionEnd) { LOGGER.warn("The starting partition in stream " + systemStream.toString() + " is bigger than the ending Partition. No partition is added");
int partitionEnd = Integer.valueOf(partitionSegment.substring(partitionSegment.lastIndexOf("-") + 1, partitionSegment.indexOf("]"))); if (partitionStart > partitionEnd) { LOGGER.warn("The starting partition in stream " + systemStream.toString() + " is bigger than the ending Partition. No partition is added");
int partitionEnd = Integer.valueOf(partitionSegment.substring(partitionSegment.lastIndexOf("-") + 1, partitionSegment.indexOf("]"))); if (partitionStart > partitionEnd) { LOGGER.warn("The starting partition in stream " + systemStream.toString() + " is bigger than the ending Partition. No partition is added");
log.warn(String.format("Exceeded timeout %ss while trying to log to %s. Dropping %d log messages.", queueTimeoutS, systemStream.toString(), messagesDropped));
log.warn(String.format("Exceeded timeout %ss while trying to log to %s. Dropping %d log messages.", queueTimeoutS, systemStream.toString(), messagesDropped));
/** * Aggregate {@link EndOfStreamMessage} from each ssp of the stream. * Invoke onEndOfStream() if the stream reaches the end. * @param eos {@link EndOfStreamMessage} object * @param ssp system stream partition * @param collector message collector * @param coordinator task coordinator */ public final void aggregateEndOfStream(EndOfStreamMessage eos, SystemStreamPartition ssp, MessageCollector collector, TaskCoordinator coordinator) { LOG.info("Received end-of-stream message from task {} in {}", eos.getTaskName(), ssp); eosStates.update(eos, ssp); SystemStream stream = ssp.getSystemStream(); if (eosStates.isEndOfStream(stream)) { LOG.info("Input {} reaches the end for task {}", stream.toString(), taskName.getTaskName()); if (eos.getTaskName() != null) { // This is the aggregation task, which already received all the eos messages from upstream // broadcast the end-of-stream to all the peer partitions controlMessageSender.broadcastToOtherPartitions(new EndOfStreamMessage(), ssp, collector); } // populate the end-of-stream through the dag onEndOfStream(collector, coordinator); if (eosStates.allEndOfStream()) { // all inputs have been end-of-stream, shut down the task LOG.info("All input streams have reached the end for task {}", taskName.getTaskName()); coordinator.commit(TaskCoordinator.RequestScope.CURRENT_TASK); coordinator.shutdown(TaskCoordinator.RequestScope.CURRENT_TASK); } } }
/** * Aggregate {@link EndOfStreamMessage} from each ssp of the stream. * Invoke onEndOfStream() if the stream reaches the end. * @param eos {@link EndOfStreamMessage} object * @param ssp system stream partition * @param collector message collector * @param coordinator task coordinator */ public final void aggregateEndOfStream(EndOfStreamMessage eos, SystemStreamPartition ssp, MessageCollector collector, TaskCoordinator coordinator) { LOG.info("Received end-of-stream message from task {} in {}", eos.getTaskName(), ssp); eosStates.update(eos, ssp); SystemStream stream = ssp.getSystemStream(); if (eosStates.isEndOfStream(stream)) { LOG.info("Input {} reaches the end for task {}", stream.toString(), taskName.getTaskName()); if (eos.getTaskName() != null) { // This is the aggregation task, which already received all the eos messages from upstream // broadcast the end-of-stream to all the peer partitions controlMessageSender.broadcastToOtherPartitions(new EndOfStreamMessage(), ssp, collector); } // populate the end-of-stream through the dag onEndOfStream(collector, coordinator); if (eosStates.allEndOfStream()) { // all inputs have been end-of-stream, shut down the task LOG.info("All input streams have reached the end for task {}", taskName.getTaskName()); coordinator.commit(TaskCoordinator.RequestScope.CURRENT_TASK); coordinator.shutdown(TaskCoordinator.RequestScope.CURRENT_TASK); } } }
/** * Aggregate {@link EndOfStreamMessage} from each ssp of the stream. * Invoke onEndOfStream() if the stream reaches the end. * @param eos {@link EndOfStreamMessage} object * @param ssp system stream partition * @param collector message collector * @param coordinator task coordinator */ public final void aggregateEndOfStream(EndOfStreamMessage eos, SystemStreamPartition ssp, MessageCollector collector, TaskCoordinator coordinator) { LOG.info("Received end-of-stream message from task {} in {}", eos.getTaskName(), ssp); eosStates.update(eos, ssp); SystemStream stream = ssp.getSystemStream(); if (eosStates.isEndOfStream(stream)) { LOG.info("Input {} reaches the end for task {}", stream.toString(), taskName.getTaskName()); if (eos.getTaskName() != null) { // This is the aggregation task, which already received all the eos messages from upstream // broadcast the end-of-stream to all the peer partitions controlMessageSender.broadcastToOtherPartitions(new EndOfStreamMessage(), ssp, collector); } // populate the end-of-stream through the dag onEndOfStream(collector, coordinator); if (eosStates.allEndOfStream()) { // all inputs have been end-of-stream, shut down the task LOG.info("All input streams have reached the end for task {}", taskName.getTaskName()); coordinator.commit(TaskCoordinator.RequestScope.CURRENT_TASK); coordinator.shutdown(TaskCoordinator.RequestScope.CURRENT_TASK); } } }
/** * Aggregate {@link EndOfStreamMessage} from each ssp of the stream. * Invoke onEndOfStream() if the stream reaches the end. * @param eos {@link EndOfStreamMessage} object * @param ssp system stream partition * @param collector message collector * @param coordinator task coordinator */ public final void aggregateEndOfStream(EndOfStreamMessage eos, SystemStreamPartition ssp, MessageCollector collector, TaskCoordinator coordinator) { LOG.info("Received end-of-stream message from task {} in {}", eos.getTaskName(), ssp); eosStates.update(eos, ssp); SystemStream stream = ssp.getSystemStream(); if (eosStates.isEndOfStream(stream)) { LOG.info("Input {} reaches the end for task {}", stream.toString(), taskName.getTaskName()); if (eos.getTaskName() != null) { // This is the aggregation task, which already received all the eos messages from upstream // broadcast the end-of-stream to all the peer partitions controlMessageSender.broadcastToOtherPartitions(new EndOfStreamMessage(), ssp, collector); } // populate the end-of-stream through the dag onEndOfStream(collector, coordinator); if (eosStates.allEndOfStream()) { // all inputs have been end-of-stream, shut down the task LOG.info("All input streams have reached the end for task {}", taskName.getTaskName()); coordinator.commit(TaskCoordinator.RequestScope.CURRENT_TASK); coordinator.shutdown(TaskCoordinator.RequestScope.CURRENT_TASK); } } }
/** * Aggregate {@link EndOfStreamMessage} from each ssp of the stream. * Invoke onEndOfStream() if the stream reaches the end. * @param eos {@link EndOfStreamMessage} object * @param ssp system stream partition * @param collector message collector * @param coordinator task coordinator */ public final void aggregateEndOfStream(EndOfStreamMessage eos, SystemStreamPartition ssp, MessageCollector collector, TaskCoordinator coordinator) { LOG.info("Received end-of-stream message from task {} in {}", eos.getTaskName(), ssp); eosStates.update(eos, ssp); SystemStream stream = ssp.getSystemStream(); if (eosStates.isEndOfStream(stream)) { LOG.info("Input {} reaches the end for task {}", stream.toString(), taskName.getTaskName()); if (eos.getTaskName() != null) { // This is the aggregation task, which already received all the eos messages from upstream // broadcast the end-of-stream to all the peer partitions controlMessageSender.broadcastToOtherPartitions(new EndOfStreamMessage(), ssp, collector); } // populate the end-of-stream through the dag onEndOfStream(collector, coordinator); if (eosStates.allEndOfStream()) { // all inputs have been end-of-stream, shut down the task LOG.info("All input streams have reached the end for task {}", taskName.getTaskName()); coordinator.commit(TaskCoordinator.RequestScope.CURRENT_TASK); coordinator.shutdown(TaskCoordinator.RequestScope.CURRENT_TASK); } } }