public void invoke() { if (callback != null) callback.onComplete(offsets, exception); } }
@Override public synchronized void commitAsync(Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback callback) { ensureNotClosed(); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) committed.put(entry.getKey(), entry.getValue()); if (callback != null) { callback.onComplete(offsets, null); } }
@Override public void onComplete(Map<TopicPartition, OffsetAndMetadata> topicPartitionOffsetAndMetadataMap, Exception e) { if (_userCallback != null) { Map<TopicPartition, OffsetAndMetadata> userOffsetMap = topicPartitionOffsetAndMetadataMap; if (topicPartitionOffsetAndMetadataMap != null) { userOffsetMap = new HashMap<>(); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : topicPartitionOffsetAndMetadataMap.entrySet()) { String rawMetadata = entry.getValue().metadata(); long userOffset = LiKafkaClientsUtils.offsetFromWrappedMetadata(rawMetadata); String userMetadata = LiKafkaClientsUtils.metadataFromWrappedMetadata(rawMetadata); userOffsetMap.put(entry.getKey(), new OffsetAndMetadata(userOffset, userMetadata)); } } _userCallback.onComplete(userOffsetMap, e); } }
@Override public void commitAsync(Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback callback) { acquire(); try { executor.schedule(() -> { try { commitSync(offsets); completedCallbacks.add(() -> callback.onComplete(offsets, null)); } catch (Exception e) { completedCallbacks.add(() -> callback.onComplete(offsets, e)); } }, 10, TimeUnit.MILLISECONDS); } finally { release(); } }