/** * force the system producer to flush the messages */ public void flushSystemProducer() { if (systemProducer != null) { systemProducer.flush(SOURCE); } }
/** * Registers a source with the underlying SystemProducer. * * @param source * The source to register. */ public void register(String source) { systemProducer.register(source); }
/** * Stops the underlying SystemProducer. */ public void stop() { log.info("Stopping coordinator stream producer."); systemProducer.stop(); systemAdmin.stop(); isStarted = false; }
@Test public void testSendToSpecificPartition() { Config eventHubConfig = MockEventHubConfigFactory.getEventHubConfig(EventHubSystemProducer.PartitioningMethod.PARTITION_KEY_AS_PARTITION); EventHubSystemFactory systemFactory = new EventHubSystemFactory(); SystemProducer systemProducer = systemFactory.getProducer(SYSTEM_NAME, eventHubConfig, new NoOpMetricsRegistry()); systemProducer.register(STREAM_NAME1); systemProducer.start(); for (int i = 0; i < 100; i++) { systemProducer.send(STREAM_NAME1, createMessageEnvelope(STREAM_NAME1, 0)); } systemProducer.flush(STREAM_NAME1); systemProducer.stop(); }
@Override public void init() { if (isInitialized.compareAndSet(false, true)) { LOG.info("Starting the coordinator stream system consumer with config: {}.", config); registerConsumer(); systemConsumer.start(); systemProducer.register(SOURCE); systemProducer.start(); iterator = new SystemStreamPartitionIterator(systemConsumer, coordinatorSystemStreamPartition); bootstrapMessagesFromStream(); } else { LOG.info("Store had already been initialized. Skipping.", coordinatorSystemStreamPartition); } }
@Override public void put(byte[] key, byte[] value) { OutgoingMessageEnvelope envelope = new OutgoingMessageEnvelope(coordinatorSystemStream, 0, key, value); systemProducer.send(SOURCE, envelope); flush(); }
@Test public void testSystemFactoryCreateAndStartProducer() { Config eventHubConfig = createEventHubConfig(); EventHubSystemFactory systemFactory = new EventHubSystemFactory(); SystemProducer systemProducer = systemFactory.getProducer(SYSTEM_NAME, eventHubConfig, new NoOpMetricsRegistry()); Assert.assertNotNull(systemProducer); systemProducer.register(STREAM_NAME1); systemProducer.register(STREAM_NAME2); systemProducer.start(); systemProducer.stop(); }
@Test(expected=SamzaException.class) public void testFlushFailedSendFromException() throws Exception { ArgumentCaptor<BulkProcessor.Listener> listenerCaptor = ArgumentCaptor.forClass(BulkProcessor.Listener.class); when(BULK_PROCESSOR_FACTORY.getBulkProcessor(eq(CLIENT), listenerCaptor.capture())) .thenReturn(processorOne); producer.register(SOURCE_ONE); listenerCaptor.getValue().afterBulk(0, null, new Throwable()); producer.flush(SOURCE_ONE); }
/** * Creates the coordinator stream, and starts the system producer. */ public void start() { if (isStarted) { log.info("Coordinator stream producer already started"); return; } log.info("Starting coordinator stream producer."); systemProducer.start(); systemAdmin.start(); isStarted = true; }
@Test public void testSend() { Config eventHubConfig = createEventHubConfig(); EventHubSystemFactory systemFactory = new EventHubSystemFactory(); SystemProducer systemProducer = systemFactory.getProducer(SYSTEM_NAME, eventHubConfig, new NoOpMetricsRegistry()); systemProducer.register(STREAM_NAME1); try { systemProducer.send(STREAM_NAME1, createMessageEnvelope(STREAM_NAME1)); Assert.fail("Sending event before starting producer should throw exception"); } catch (SamzaException e) { } systemProducer.start(); systemProducer.send(STREAM_NAME1, createMessageEnvelope(STREAM_NAME1)); try { systemProducer.send("unregistered_stream", createMessageEnvelope("unregistered_stream")); Assert.fail("Sending event to destination that is not registered should throw exception"); } catch (SamzaException e) { } try { systemProducer.register(STREAM_NAME2); Assert.fail("Trying to register after starting producer should throw exception"); } catch (SamzaException e) { } systemProducer.flush(STREAM_NAME1); systemProducer.stop(); }
@Override public void init() { if (isInitialized.compareAndSet(false, true)) { LOG.info("Starting the coordinator stream system consumer with config: {}.", config); registerConsumer(); systemConsumer.start(); systemProducer.register(SOURCE); systemProducer.start(); iterator = new SystemStreamPartitionIterator(systemConsumer, coordinatorSystemStreamPartition); bootstrapMessagesFromStream(); } else { LOG.info("Store had already been initialized. Skipping.", coordinatorSystemStreamPartition); } }
private void startTransferThread() { try { // Serialize the key once, since we will use it for every event. final byte[] keyBytes = key.getBytes("UTF-8"); Runnable transferFromQueueToSystem = () -> { while (!Thread.currentThread().isInterrupted()) { try { byte[] serializedLogEvent = logQueue.take(); OutgoingMessageEnvelope outgoingMessageEnvelope = new OutgoingMessageEnvelope(systemStream, keyBytes, serializedLogEvent); systemProducer.send(SOURCE, outgoingMessageEnvelope); } catch (InterruptedException e) { // Preserve the interrupted status for the loop condition. Thread.currentThread().interrupt(); } catch (Throwable t) { log.error("Error sending StreamAppender event to SystemProducer", t); } } }; transferThread = new Thread(transferFromQueueToSystem); transferThread.setDaemon(true); transferThread.setName("Samza StreamAppender Producer " + transferThread.getName()); transferThread.start(); } catch (UnsupportedEncodingException e) { throw new SamzaException(String.format( "Container name: %s could not be encoded to bytes. StreamAppender cannot proceed.", key), e); } }
@Test(expected=SamzaException.class) public void testFlushFailedSendFromFailedDocument() throws Exception { ArgumentCaptor<BulkProcessor.Listener> listenerCaptor = ArgumentCaptor.forClass(BulkProcessor.Listener.class); when(BULK_PROCESSOR_FACTORY.getBulkProcessor(eq(CLIENT), listenerCaptor.capture())) .thenReturn(processorOne); producer.register(SOURCE_ONE); BulkResponse response = getRespWithFailedDocument(RestStatus.BAD_REQUEST); listenerCaptor.getValue().afterBulk(0, null, response); producer.flush(SOURCE_ONE); }
/** * Creates the coordinator stream, and starts the system producer. */ public void start() { if (isStarted) { log.info("Coordinator stream producer already started"); return; } log.info("Starting coordinator stream producer."); systemProducer.start(); systemAdmin.start(); isStarted = true; }
@Override public void init() { if (isInitialized.compareAndSet(false, true)) { LOG.info("Starting the coordinator stream system consumer with config: {}.", config); registerConsumer(); systemConsumer.start(); systemProducer.register(SOURCE); systemProducer.start(); iterator = new SystemStreamPartitionIterator(systemConsumer, coordinatorSystemStreamPartition); bootstrapMessagesFromStream(); } else { LOG.info("Store had already been initialized. Skipping.", coordinatorSystemStreamPartition); } }
@Override public void put(byte[] key, byte[] value) { OutgoingMessageEnvelope envelope = new OutgoingMessageEnvelope(coordinatorSystemStream, 0, key, value); systemProducer.send(SOURCE, envelope); flush(); }
/** * Stops the underlying SystemProducer. */ public void stop() { log.info("Stopping coordinator stream producer."); systemProducer.stop(); systemAdmin.stop(); isStarted = false; }
/** * force the system producer to flush the messages */ private void flushSystemProducer() { if (systemProducer != null) { systemProducer.flush(SOURCE); } }
@Test public void testIgnoreVersionConficts() throws Exception { ArgumentCaptor<BulkProcessor.Listener> listenerCaptor = ArgumentCaptor.forClass(BulkProcessor.Listener.class); when(BULK_PROCESSOR_FACTORY.getBulkProcessor(eq(CLIENT), listenerCaptor.capture())) .thenReturn(processorOne); producer.register(SOURCE_ONE); BulkResponse response = getRespWithFailedDocument(RestStatus.CONFLICT); listenerCaptor.getValue().afterBulk(0, null, response); assertEquals(1, metrics.conflicts.getCount()); producer.flush(SOURCE_ONE); }
/** * Registers a source with the underlying SystemProducer. * * @param source * The source to register. */ public void register(String source) { systemProducer.register(source); }