@Override public byte[] toByteArray() { return messageId.toByteArray(); }
@Override public int compareTo(MessageId o) { return messageId.compareTo(o); } }
@Override public void onWebSocketText(String message) { super.onWebSocketText(message); // We should have received an ack MessageId msgId; try { ConsumerAck ack = ObjectMapperFactory.getThreadLocal().readValue(message, ConsumerAck.class); msgId = MessageId.fromByteArrayWithTopic(Base64.getDecoder().decode(ack.messageId), topic); } catch (IOException e) { log.warn("Failed to deserialize message id: {}", message, e); close(WebSocketError.FailedToDeserializeFromJSON); return; } consumer.acknowledgeAsync(msgId).thenAccept(consumer -> numMsgsAcked.increment()); int pending = pendingMessages.getAndDecrement(); if (pending >= maxPendingMessages) { // Resume delivery receiveMessage(); } }
@Override public byte[] toByteArray() { return messageId.toByteArray(); }
/** * Since the ack are delayed, we need to do some best-effort duplicate check to discard messages that are being * resent after a disconnection and for which the user has already sent an acknowlowdgement. */ public boolean isDuplicate(MessageId messageId) { if (messageId.compareTo(lastCumulativeAck) <= 0) { // Already included in a cumulative ack return true; } else { return pendingIndividualAcks.contains(messageId); } }
private CompletableFuture<Long> phaseTwo(RawReader reader, MessageId from, MessageId to, MessageId lastReadId, Map<String, MessageId> latestForKey, BookKeeper bk) { Map<String, byte[]> metadata = ImmutableMap.of("compactedTopic", reader.getTopic().getBytes(UTF_8), "compactedTo", to.toByteArray()); return createLedger(bk, metadata).thenCompose((ledger) -> { log.info("Commencing phase two of compaction for {}, from {} to {}, compacting {} keys to ledger {}", reader.getTopic(), from, to, latestForKey.size(), ledger.getId()); return phaseTwoSeekThenLoop(reader, from, to, lastReadId, latestForKey, bk, ledger); }); }
public CompletableFuture<Boolean> hasMessageAvailableAsync() { final CompletableFuture<Boolean> booleanFuture = new CompletableFuture<>(); if (lastMessageIdInBroker.compareTo(lastDequeuedMessage) > 0 && ((MessageIdImpl)lastMessageIdInBroker).getEntryId() != -1) { booleanFuture.complete(true); } else { getLastMessageIdAsync().thenAccept(messageId -> { lastMessageIdInBroker = messageId; if (lastMessageIdInBroker.compareTo(lastDequeuedMessage) > 0 && ((MessageIdImpl)lastMessageIdInBroker).getEntryId() != -1) { booleanFuture.complete(true); } else { booleanFuture.complete(false); } }).exceptionally(e -> { log.error("[{}][{}] Failed getLastMessageId command", topic, subscription); booleanFuture.completeExceptionally(e.getCause()); return null; }); } return booleanFuture; }
updateSentMsgStats(msgSize, TimeUnit.NANOSECONDS.toMicros(System.nanoTime() - now)); if (isConnected()) { String messageId = Base64.getEncoder().encodeToString(msgId.toByteArray()); sendAckResponse(new ProducerAck(messageId, sendRequest.context));
public int removeMessagesTill(MessageId msgId) { writeLock.lock(); try { int removed = 0; Iterator<MessageId> iterator = messageIdPartitionMap.keySet().iterator(); while (iterator.hasNext()) { MessageId messageId = iterator.next(); if (messageId.compareTo(msgId) <= 0) { ConcurrentOpenHashSet<MessageId> exist = messageIdPartitionMap.get(messageId); if (exist != null) { exist.remove(messageId); } iterator.remove(); removed ++; } } return removed; } finally { writeLock.unlock(); } }
@Override public void write(Record<T> record) throws Exception { TypedMessageBuilder<T> msg = pulsarSinkProcessor.newMessage(record); if (record.getKey().isPresent()) { msg.key(record.getKey().get()); } msg.value(record.getValue()); if (!record.getProperties().isEmpty()) { msg.properties(record.getProperties()); } SinkRecord<T> sinkRecord = (SinkRecord<T>) record; if (sinkRecord.getSourceRecord() instanceof PulsarRecord) { PulsarRecord<T> pulsarRecord = (PulsarRecord<T>) sinkRecord.getSourceRecord(); // forward user properties to sink-topic msg.property("__pfn_input_topic__", pulsarRecord.getTopicName().get()) .property("__pfn_input_msg_id__", new String(Base64.getEncoder().encode(pulsarRecord.getMessageId().toByteArray()))); } else { // It is coming from some source Optional<Long> eventTime = sinkRecord.getSourceRecord().getEventTime(); if (eventTime.isPresent()) { msg.eventTime(eventTime.get()); } } pulsarSinkProcessor.sendOutputMessage(msg, record); }
public boolean hasMessageAvailable() throws PulsarClientException { try { if (lastMessageIdInBroker.compareTo(lastDequeuedMessage) > 0 && ((MessageIdImpl)lastMessageIdInBroker).getEntryId() != -1) { return true; } return hasMessageAvailableAsync().get(); } catch (ExecutionException | InterruptedException e) { throw new PulsarClientException(e); } }
dm.messageId = Base64.getEncoder().encodeToString(msg.getMessageId().toByteArray()); dm.payload = Base64.getEncoder().encodeToString(msg.getData()); dm.properties = msg.getProperties();
@Override public int compareTo(MessageId o) { return messageId.compareTo(o); } }
dm.messageId = Base64.getEncoder().encodeToString(msg.getMessageId().toByteArray()); dm.payload = Base64.getEncoder().encodeToString(msg.getData()); dm.properties = msg.getProperties();
/** * Since the ack are delayed, we need to do some best-effort duplicate check to discard messages that are being * resent after a disconnection and for which the user has already sent an acknowlowdgement. */ public boolean isDuplicate(MessageId messageId) { if (messageId.compareTo(lastCumulativeAck) <= 0) { // Already included in a cumulative ack return true; } else { return pendingIndividualAcks.contains(messageId); } }
public int removeMessagesTill(MessageId msgId) { readLock.lock(); try { int currentSetRemovedMsgCount = currentSet.removeIf(m -> (m.compareTo(msgId) <= 0)); int oldSetRemovedMsgCount = oldOpenSet.removeIf(m -> (m.compareTo(msgId) <= 0)); return currentSetRemovedMsgCount + oldSetRemovedMsgCount; } finally { readLock.unlock(); } }
public CompletableFuture<Boolean> hasMessageAvailableAsync() { final CompletableFuture<Boolean> booleanFuture = new CompletableFuture<>(); if (lastMessageIdInBroker.compareTo(lastDequeuedMessage) > 0 && ((MessageIdImpl)lastMessageIdInBroker).getEntryId() != -1) { booleanFuture.complete(true); } else { getLastMessageIdAsync().thenAccept(messageId -> { lastMessageIdInBroker = messageId; if (lastMessageIdInBroker.compareTo(lastDequeuedMessage) > 0 && ((MessageIdImpl)lastMessageIdInBroker).getEntryId() != -1) { booleanFuture.complete(true); } else { booleanFuture.complete(false); } }).exceptionally(e -> { log.error("[{}][{}] Failed getLastMessageId command", topic, subscription); booleanFuture.completeExceptionally(e.getCause()); return null; }); } return booleanFuture; }
public boolean hasMessageAvailable() throws PulsarClientException { try { if (lastMessageIdInBroker.compareTo(lastDequeuedMessage) > 0 && ((MessageIdImpl)lastMessageIdInBroker).getEntryId() != -1) { return true; } return hasMessageAvailableAsync().get(); } catch (ExecutionException | InterruptedException e) { throw new PulsarClientException(e); } }
if (id.compareTo(lastMessageId) == 0) { loopPromise.complete(new PhaseOneResult(first, to, lastMessageId, latestForKey)); } else {