private void populateAttributes(final BundleTracker tracker) { final Map<String, String> kafkaAttrs = new HashMap<>(); kafkaAttrs.put(KafkaProcessorUtils.KAFKA_OFFSET, String.valueOf(tracker.initialOffset)); if (tracker.key != null && tracker.totalRecords == 1) { kafkaAttrs.put(KafkaProcessorUtils.KAFKA_KEY, tracker.key); } kafkaAttrs.put(KafkaProcessorUtils.KAFKA_PARTITION, String.valueOf(tracker.partition)); kafkaAttrs.put(KafkaProcessorUtils.KAFKA_TOPIC, tracker.topic); if (tracker.totalRecords > 1) { kafkaAttrs.put(KafkaProcessorUtils.KAFKA_COUNT, String.valueOf(tracker.totalRecords)); } final FlowFile newFlowFile = getProcessSession().putAllAttributes(tracker.flowFile, kafkaAttrs); final long executionDurationMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - leaseStartNanos); final String transitUri = KafkaProcessorUtils.buildTransitURI(securityProtocol, bootstrapServers, tracker.topic); getProcessSession().getProvenanceReporter().receive(newFlowFile, transitUri, executionDurationMillis); tracker.updateFlowFile(newFlowFile); }
private void populateAttributes(final BundleTracker tracker) { final Map<String, String> kafkaAttrs = new HashMap<>(); kafkaAttrs.put(KafkaProcessorUtils.KAFKA_OFFSET, String.valueOf(tracker.initialOffset)); if (tracker.key != null && tracker.totalRecords == 1) { kafkaAttrs.put(KafkaProcessorUtils.KAFKA_KEY, tracker.key); } kafkaAttrs.put(KafkaProcessorUtils.KAFKA_PARTITION, String.valueOf(tracker.partition)); kafkaAttrs.put(KafkaProcessorUtils.KAFKA_TOPIC, tracker.topic); if (tracker.totalRecords > 1) { // Add a record.count attribute to remain consistent with other record-oriented processors. If not // reading/writing records, then use "kafka.count" attribute. if (tracker.recordWriter == null) { kafkaAttrs.put(KafkaProcessorUtils.KAFKA_COUNT, String.valueOf(tracker.totalRecords)); } else { kafkaAttrs.put("record.count", String.valueOf(tracker.totalRecords)); } } final FlowFile newFlowFile = getProcessSession().putAllAttributes(tracker.flowFile, kafkaAttrs); final long executionDurationMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - leaseStartNanos); final String transitUri = KafkaProcessorUtils.buildTransitURI(securityProtocol, bootstrapServers, tracker.topic); getProcessSession().getProvenanceReporter().receive(newFlowFile, transitUri, executionDurationMillis); tracker.updateFlowFile(newFlowFile); }
private void populateAttributes(final BundleTracker tracker) { final Map<String, String> kafkaAttrs = new HashMap<>(); kafkaAttrs.put(KafkaProcessorUtils.KAFKA_OFFSET, String.valueOf(tracker.initialOffset)); if (tracker.key != null && tracker.totalRecords == 1) { kafkaAttrs.put(KafkaProcessorUtils.KAFKA_KEY, tracker.key); } kafkaAttrs.put(KafkaProcessorUtils.KAFKA_PARTITION, String.valueOf(tracker.partition)); kafkaAttrs.put(KafkaProcessorUtils.KAFKA_TOPIC, tracker.topic); if (tracker.totalRecords > 1) { // Add a record.count attribute to remain consistent with other record-oriented processors. If not // reading/writing records, then use "kafka.count" attribute. if (tracker.recordWriter == null) { kafkaAttrs.put(KafkaProcessorUtils.KAFKA_COUNT, String.valueOf(tracker.totalRecords)); } else { kafkaAttrs.put("record.count", String.valueOf(tracker.totalRecords)); } } final FlowFile newFlowFile = getProcessSession().putAllAttributes(tracker.flowFile, kafkaAttrs); final long executionDurationMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - leaseStartNanos); final String transitUri = KafkaProcessorUtils.buildTransitURI(securityProtocol, bootstrapServers, tracker.topic); getProcessSession().getProvenanceReporter().receive(newFlowFile, transitUri, executionDurationMillis); tracker.updateFlowFile(newFlowFile); }
private void populateAttributes(final BundleTracker tracker) { final Map<String, String> kafkaAttrs = new HashMap<>(); kafkaAttrs.put(KafkaProcessorUtils.KAFKA_OFFSET, String.valueOf(tracker.initialOffset)); if (tracker.key != null && tracker.totalRecords == 1) { kafkaAttrs.put(KafkaProcessorUtils.KAFKA_KEY, tracker.key); } kafkaAttrs.put(KafkaProcessorUtils.KAFKA_PARTITION, String.valueOf(tracker.partition)); kafkaAttrs.put(KafkaProcessorUtils.KAFKA_TOPIC, tracker.topic); if (tracker.totalRecords > 1) { // Add a record.count attribute to remain consistent with other record-oriented processors. If not // reading/writing records, then use "kafka.count" attribute. if (tracker.recordWriter == null) { kafkaAttrs.put(KafkaProcessorUtils.KAFKA_COUNT, String.valueOf(tracker.totalRecords)); } else { kafkaAttrs.put("record.count", String.valueOf(tracker.totalRecords)); } } final FlowFile newFlowFile = getProcessSession().putAllAttributes(tracker.flowFile, kafkaAttrs); final long executionDurationMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - leaseStartNanos); final String transitUri = KafkaProcessorUtils.buildTransitURI(securityProtocol, bootstrapServers, tracker.topic); getProcessSession().getProvenanceReporter().receive(newFlowFile, transitUri, executionDurationMillis); tracker.updateFlowFile(newFlowFile); }
private void populateAttributes(final BundleTracker tracker) { final Map<String, String> kafkaAttrs = new HashMap<>(); kafkaAttrs.put(KafkaProcessorUtils.KAFKA_OFFSET, String.valueOf(tracker.initialOffset)); if (tracker.key != null && tracker.totalRecords == 1) { kafkaAttrs.put(KafkaProcessorUtils.KAFKA_KEY, tracker.key); } kafkaAttrs.put(KafkaProcessorUtils.KAFKA_PARTITION, String.valueOf(tracker.partition)); kafkaAttrs.put(KafkaProcessorUtils.KAFKA_TOPIC, tracker.topic); if (tracker.totalRecords > 1) { // Add a record.count attribute to remain consistent with other record-oriented processors. If not // reading/writing records, then use "kafka.count" attribute. if (tracker.recordWriter == null) { kafkaAttrs.put(KafkaProcessorUtils.KAFKA_COUNT, String.valueOf(tracker.totalRecords)); } else { kafkaAttrs.put("record.count", String.valueOf(tracker.totalRecords)); } } final FlowFile newFlowFile = getProcessSession().putAllAttributes(tracker.flowFile, kafkaAttrs); final long executionDurationMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - leaseStartNanos); final String transitUri = KafkaProcessorUtils.buildTransitURI(securityProtocol, bootstrapServers, tracker.topic); getProcessSession().getProvenanceReporter().receive(newFlowFile, transitUri, executionDurationMillis); tracker.updateFlowFile(newFlowFile); }
private void finalizeFlowFile(final ProcessSession session, final HBaseClientService hBaseClientService, FlowFile flowFile, final String tableName, Long rowsPulled, Exception e) { Relationship rel = REL_SUCCESS; flowFile = session.putAttribute(flowFile, HBASE_ROWS_COUNT_ATTR, rowsPulled.toString()); final AtomicReference<IOException> ioe = new AtomicReference<>(null); flowFile = session.append(flowFile, (out) -> { try{ out.write("]".getBytes()); }catch(IOException ei){ ioe.set(ei); } }); if (e != null || ioe.get() != null) { flowFile = session.putAttribute(flowFile, "scanhbase.error", (e==null?e:ioe.get()).toString()); rel = REL_FAILURE; } else { session.getProvenanceReporter().receive(flowFile, hBaseClientService.toTransitUri(tableName, "{ids}")); } session.transfer(flowFile, rel); }
private void transferTo(Relationship relationship, ProcessSession session, StopWatch stopWatch, String eventHubName, String partitionId, String consumerGroup, FlowFile flowFile) { session.transfer(flowFile, relationship); final String transitUri = "amqps://" + namespaceName + ".servicebus.windows.net/" + eventHubName + "/ConsumerGroups/" + consumerGroup + "/Partitions/" + partitionId; session.getProvenanceReporter().receive(flowFile, transitUri, stopWatch.getElapsed(TimeUnit.MILLISECONDS)); }
private void forwardFlowFile(final ProcessContext context, final ProcessSession session, HttpRequestContainer container, final long start, final HttpServletRequest request, FlowFile flowFile) { final long receiveMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start); String subjectDn = flowFile.getAttribute(HTTPUtils.HTTP_SSL_CERT); session.getProvenanceReporter().receive(flowFile, HTTPUtils.getURI(flowFile.getAttributes()), "Received from " + request.getRemoteAddr() + (subjectDn == null ? "" : " with DN=" + subjectDn), receiveMillis); session.transfer(flowFile, REL_SUCCESS); getLogger().info("Transferring {} to 'success'; received from {}", new Object[]{flowFile, request.getRemoteAddr()}); }
/** * Will release flow file. Releasing of the flow file in the context of this * operation implies the following: * * If Empty then remove from session and return * If has something then transfer to REL_SUCCESS */ private void releaseFlowFile(FlowFile flowFile, ProcessSession session, Map<String, String> attributes, long start, String topic, int msgCount){ if (flowFile.getSize() == 0L) { session.remove(flowFile); } else { flowFile = session.putAllAttributes(flowFile, attributes); final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start); session.getProvenanceReporter().receive(flowFile, "kafka://" + topic, "Received " + msgCount + " Kafka messages", millis); getLogger().info("Successfully received {} from Kafka with {} messages in {} millis", new Object[]{flowFile, msgCount, millis}); session.transfer(flowFile, REL_SUCCESS); } } }
private void handleParseFailure(final ConsumerRecord<byte[], byte[]> consumerRecord, final ProcessSession session, final Exception cause, final String message) { // If we are unable to parse the data, we need to transfer it to 'parse failure' relationship final Map<String, String> attributes = getAttributes(consumerRecord); attributes.put(KafkaProcessorUtils.KAFKA_OFFSET, String.valueOf(consumerRecord.offset())); attributes.put(KafkaProcessorUtils.KAFKA_PARTITION, String.valueOf(consumerRecord.partition())); attributes.put(KafkaProcessorUtils.KAFKA_TOPIC, consumerRecord.topic()); FlowFile failureFlowFile = session.create(); final byte[] value = consumerRecord.value(); if (value != null) { failureFlowFile = session.write(failureFlowFile, out -> out.write(value)); } failureFlowFile = session.putAllAttributes(failureFlowFile, attributes); final String transitUri = KafkaProcessorUtils.buildTransitURI(securityProtocol, bootstrapServers, consumerRecord.topic()); session.getProvenanceReporter().receive(failureFlowFile, transitUri); session.transfer(failureFlowFile, REL_PARSE_FAILURE); if (cause == null) { logger.error(message); } else { logger.error(message, cause); } session.adjustCounter("Parse Failures", 1, false); }
private void handleParseFailure(final ConsumerRecord<byte[], byte[]> consumerRecord, final ProcessSession session, final Exception cause, final String message) { // If we are unable to parse the data, we need to transfer it to 'parse failure' relationship final Map<String, String> attributes = getAttributes(consumerRecord); attributes.put(KafkaProcessorUtils.KAFKA_OFFSET, String.valueOf(consumerRecord.offset())); attributes.put(KafkaProcessorUtils.KAFKA_PARTITION, String.valueOf(consumerRecord.partition())); attributes.put(KafkaProcessorUtils.KAFKA_TOPIC, consumerRecord.topic()); FlowFile failureFlowFile = session.create(); final byte[] value = consumerRecord.value(); if (value != null) { failureFlowFile = session.write(failureFlowFile, out -> out.write(value)); } failureFlowFile = session.putAllAttributes(failureFlowFile, attributes); final String transitUri = KafkaProcessorUtils.buildTransitURI(securityProtocol, bootstrapServers, consumerRecord.topic()); session.getProvenanceReporter().receive(failureFlowFile, transitUri); session.transfer(failureFlowFile, REL_PARSE_FAILURE); if (cause == null) { logger.error(message); } else { logger.error(message, cause); } session.adjustCounter("Parse Failures", 1, false); }
private void handleParseFailure(final ConsumerRecord<byte[], byte[]> consumerRecord, final ProcessSession session, final Exception cause, final String message) { // If we are unable to parse the data, we need to transfer it to 'parse failure' relationship final Map<String, String> attributes = getAttributes(consumerRecord); attributes.put(KafkaProcessorUtils.KAFKA_OFFSET, String.valueOf(consumerRecord.offset())); attributes.put(KafkaProcessorUtils.KAFKA_PARTITION, String.valueOf(consumerRecord.partition())); attributes.put(KafkaProcessorUtils.KAFKA_TOPIC, consumerRecord.topic()); FlowFile failureFlowFile = session.create(); final byte[] value = consumerRecord.value(); if (value != null) { failureFlowFile = session.write(failureFlowFile, out -> out.write(value)); } failureFlowFile = session.putAllAttributes(failureFlowFile, attributes); final String transitUri = KafkaProcessorUtils.buildTransitURI(securityProtocol, bootstrapServers, consumerRecord.topic()); session.getProvenanceReporter().receive(failureFlowFile, transitUri); session.transfer(failureFlowFile, REL_PARSE_FAILURE); if (cause == null) { logger.error(message); } else { logger.error(message, cause); } session.adjustCounter("Parse Failures", 1, false); }
protected void writeBatch(String payload, FlowFile parent, ProcessContext context, ProcessSession session, Map<String, String> extraAttributes, Relationship rel) throws UnsupportedEncodingException { String charset = context.getProperty(CHARSET).evaluateAttributeExpressions(parent).getValue(); FlowFile flowFile = parent != null ? session.create(parent) : session.create(); flowFile = session.importFrom(new ByteArrayInputStream(payload.getBytes(charset)), flowFile); flowFile = session.putAllAttributes(flowFile, extraAttributes); session.getProvenanceReporter().receive(flowFile, getURI(context)); session.transfer(flowFile, rel); }
/** * Creates and transfers a new flow file whose contents are the JSON-serialized value of the specified event, and the sequence ID attribute set * * @param session A reference to a ProcessSession from which the flow file(s) will be created and transferred * @param eventInfo An event whose value will become the contents of the flow file * @return The next available CDC sequence ID for use by the CDC processor */ @Override public long writeEvent(final ProcessSession session, String transitUri, final DeleteRowsEventInfo eventInfo, final long currentSequenceId, Relationship relationship) { final AtomicLong seqId = new AtomicLong(currentSequenceId); for (Serializable[] row : eventInfo.getRows()) { FlowFile flowFile = session.create(); flowFile = session.write(flowFile, outputStream -> { super.startJson(outputStream, eventInfo); super.writeJson(eventInfo); final BitSet bitSet = eventInfo.getIncludedColumns(); writeRow(eventInfo, row, bitSet); super.endJson(); }); flowFile = session.putAllAttributes(flowFile, getCommonAttributes(seqId.get(), eventInfo)); session.transfer(flowFile, relationship); session.getProvenanceReporter().receive(flowFile, transitUri); seqId.getAndIncrement(); } return seqId.get(); }
@Override public void accept(final JMSResponse response) { if (response == null) { return; } FlowFile flowFile = processSession.create(); flowFile = processSession.write(flowFile, out -> out.write(response.getMessageBody())); final Map<String, String> jmsHeaders = response.getMessageHeaders(); final Map<String, String> jmsProperties = response.getMessageProperties(); flowFile = ConsumeJMS.this.updateFlowFileAttributesWithJMSAttributes(jmsHeaders, flowFile, processSession); flowFile = ConsumeJMS.this.updateFlowFileAttributesWithJMSAttributes(jmsProperties, flowFile, processSession); flowFile = processSession.putAttribute(flowFile, JMS_SOURCE_DESTINATION_NAME, destinationName); processSession.getProvenanceReporter().receive(flowFile, destinationName); processSession.transfer(flowFile, REL_SUCCESS); processSession.commit(); } });
@Override public long writeEvent(ProcessSession session, String transitUri, DDLEventInfo eventInfo, long currentSequenceId, Relationship relationship) { FlowFile flowFile = session.create(); flowFile = session.write(flowFile, (outputStream) -> { super.startJson(outputStream, eventInfo); super.writeJson(eventInfo); jsonGenerator.writeStringField("query", eventInfo.getQuery()); super.endJson(); }); flowFile = session.putAllAttributes(flowFile, getCommonAttributes(currentSequenceId, eventInfo)); session.transfer(flowFile, relationship); session.getProvenanceReporter().receive(flowFile, transitUri); return currentSequenceId + 1; } }
@Override public long writeEvent(ProcessSession session, String transitUri, T eventInfo, long currentSequenceId, Relationship relationship) { FlowFile flowFile = session.create(); flowFile = session.write(flowFile, (outputStream) -> { super.startJson(outputStream, eventInfo); writeJson(eventInfo); // Nothing in the body super.endJson(); }); flowFile = session.putAllAttributes(flowFile, getCommonAttributes(currentSequenceId, eventInfo)); session.transfer(flowFile, relationship); session.getProvenanceReporter().receive(flowFile, transitUri); return currentSequenceId + 1; } }
@Override public long writeEvent(ProcessSession session, String transitUri, T eventInfo, long currentSequenceId, Relationship relationship) { FlowFile flowFile = session.create(); flowFile = session.write(flowFile, (outputStream) -> { super.startJson(outputStream, eventInfo); writeJson(eventInfo); // Nothing in the body super.endJson(); }); flowFile = session.putAllAttributes(flowFile, getCommonAttributes(currentSequenceId, eventInfo)); session.transfer(flowFile, relationship); session.getProvenanceReporter().receive(flowFile, transitUri); return currentSequenceId + 1; } }
/** * */ private void receiveFromSpring(ProcessSession processSession) { final SpringResponse<?> msgFromSpring = this.exchanger.receive(this.receiveTimeout); if (msgFromSpring != null) { FlowFile flowFileToProcess = processSession.create(); flowFileToProcess = processSession.write(flowFileToProcess, new OutputStreamCallback() { @Override public void process(final OutputStream out) throws IOException { Object payload = msgFromSpring.getPayload(); byte[] payloadBytes = payload instanceof String ? ((String) payload).getBytes() : (byte[]) payload; out.write(payloadBytes); } }); flowFileToProcess = processSession.putAllAttributes(flowFileToProcess, this.extractFlowFileAttributesFromMessageHeaders(msgFromSpring.getHeaders())); processSession.transfer(flowFileToProcess, REL_SUCCESS); processSession.getProvenanceReporter().receive(flowFileToProcess, this.applicationContextConfigFileName); } }
private void handleParseFailure(final StandardEvent event, final ProcessSession session, final Exception cause, final String message) { // If we are unable to parse the data, we need to transfer it to 'parse failure' relationship final Map<String, String> attributes = getAttributes(event.getSender()); FlowFile failureFlowFile = session.create(); failureFlowFile = session.write(failureFlowFile, out -> out.write(event.getData())); failureFlowFile = session.putAllAttributes(failureFlowFile, attributes); final String transitUri = getTransitUri(event.getSender()); session.getProvenanceReporter().receive(failureFlowFile, transitUri); session.transfer(failureFlowFile, REL_PARSE_FAILURE); if (cause == null) { getLogger().error(message); } else { getLogger().error(message, cause); } session.adjustCounter("Parse Failures", 1, false); }