/** * Returns a ProvenanceReporter that is tied to this ProcessSession. * * @return the provenance reporter */ @Override public ProvenanceReporter getProvenanceReporter() { return s.getProvenanceReporter(); }
private void sendProvenance(ProcessSession session, FlowFile flowFile, int columns, long time, PutFlowFile pff) { final String details = String.format("Put %d cells to HBase.", columns); session.getProvenanceReporter().send(flowFile, getTransitUri(pff), details, time); }
private void populateAttributes(final BundleTracker tracker) { final Map<String, String> kafkaAttrs = new HashMap<>(); kafkaAttrs.put(KafkaProcessorUtils.KAFKA_OFFSET, String.valueOf(tracker.initialOffset)); if (tracker.key != null && tracker.totalRecords == 1) { kafkaAttrs.put(KafkaProcessorUtils.KAFKA_KEY, tracker.key); } kafkaAttrs.put(KafkaProcessorUtils.KAFKA_PARTITION, String.valueOf(tracker.partition)); kafkaAttrs.put(KafkaProcessorUtils.KAFKA_TOPIC, tracker.topic); if (tracker.totalRecords > 1) { kafkaAttrs.put(KafkaProcessorUtils.KAFKA_COUNT, String.valueOf(tracker.totalRecords)); } final FlowFile newFlowFile = getProcessSession().putAllAttributes(tracker.flowFile, kafkaAttrs); final long executionDurationMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - leaseStartNanos); final String transitUri = KafkaProcessorUtils.buildTransitURI(securityProtocol, bootstrapServers, tracker.topic); getProcessSession().getProvenanceReporter().receive(newFlowFile, transitUri, executionDurationMillis); tracker.updateFlowFile(newFlowFile); }
private void populateAttributes(final BundleTracker tracker) { final Map<String, String> kafkaAttrs = new HashMap<>(); kafkaAttrs.put(KafkaProcessorUtils.KAFKA_OFFSET, String.valueOf(tracker.initialOffset)); if (tracker.key != null && tracker.totalRecords == 1) { kafkaAttrs.put(KafkaProcessorUtils.KAFKA_KEY, tracker.key); } kafkaAttrs.put(KafkaProcessorUtils.KAFKA_PARTITION, String.valueOf(tracker.partition)); kafkaAttrs.put(KafkaProcessorUtils.KAFKA_TOPIC, tracker.topic); if (tracker.totalRecords > 1) { // Add a record.count attribute to remain consistent with other record-oriented processors. If not // reading/writing records, then use "kafka.count" attribute. if (tracker.recordWriter == null) { kafkaAttrs.put(KafkaProcessorUtils.KAFKA_COUNT, String.valueOf(tracker.totalRecords)); } else { kafkaAttrs.put("record.count", String.valueOf(tracker.totalRecords)); } } final FlowFile newFlowFile = getProcessSession().putAllAttributes(tracker.flowFile, kafkaAttrs); final long executionDurationMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - leaseStartNanos); final String transitUri = KafkaProcessorUtils.buildTransitURI(securityProtocol, bootstrapServers, tracker.topic); getProcessSession().getProvenanceReporter().receive(newFlowFile, transitUri, executionDurationMillis); tracker.updateFlowFile(newFlowFile); }
private void finalizeFlowFile(final ProcessSession session, final HBaseClientService hBaseClientService, FlowFile flowFile, final String tableName, Long rowsPulled, Exception e) { Relationship rel = REL_SUCCESS; flowFile = session.putAttribute(flowFile, HBASE_ROWS_COUNT_ATTR, rowsPulled.toString()); final AtomicReference<IOException> ioe = new AtomicReference<>(null); flowFile = session.append(flowFile, (out) -> { try{ out.write("]".getBytes()); }catch(IOException ei){ ioe.set(ei); } }); if (e != null || ioe.get() != null) { flowFile = session.putAttribute(flowFile, "scanhbase.error", (e==null?e:ioe.get()).toString()); rel = REL_FAILURE; } else { session.getProvenanceReporter().receive(flowFile, hBaseClientService.toTransitUri(tableName, "{ids}")); } session.transfer(flowFile, rel); }
private void transferTo(Relationship relationship, ProcessSession session, StopWatch stopWatch, String eventHubName, String partitionId, String consumerGroup, FlowFile flowFile) { session.transfer(flowFile, relationship); final String transitUri = "amqps://" + namespaceName + ".servicebus.windows.net/" + eventHubName + "/ConsumerGroups/" + consumerGroup + "/Partitions/" + partitionId; session.getProvenanceReporter().receive(flowFile, transitUri, stopWatch.getElapsed(TimeUnit.MILLISECONDS)); }
private void forwardFlowFile(final ProcessContext context, final ProcessSession session, HttpRequestContainer container, final long start, final HttpServletRequest request, FlowFile flowFile) { final long receiveMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start); String subjectDn = flowFile.getAttribute(HTTPUtils.HTTP_SSL_CERT); session.getProvenanceReporter().receive(flowFile, HTTPUtils.getURI(flowFile.getAttributes()), "Received from " + request.getRemoteAddr() + (subjectDn == null ? "" : " with DN=" + subjectDn), receiveMillis); session.transfer(flowFile, REL_SUCCESS); getLogger().info("Transferring {} to 'success'; received from {}", new Object[]{flowFile, request.getRemoteAddr()}); }
private void completeFlowFile(final ProcessSession session, final FlowFile flowFile, final RecordSetWriter writer, final Relationship relationship, final String details) throws IOException { final WriteResult writeResult = writer.finishRecordSet(); writer.close(); final Map<String, String> attributes = new HashMap<>(); attributes.putAll(writeResult.getAttributes()); attributes.put("record.count", String.valueOf(writeResult.getRecordCount())); attributes.put(CoreAttributes.MIME_TYPE.key(), writer.getMimeType()); session.putAllAttributes(flowFile, attributes); session.transfer(flowFile, relationship); session.getProvenanceReporter().route(flowFile, relationship, details); }
/** * Will release flow file. Releasing of the flow file in the context of this * operation implies the following: * * If Empty then remove from session and return * If has something then transfer to REL_SUCCESS */ private void releaseFlowFile(FlowFile flowFile, ProcessSession session, Map<String, String> attributes, long start, String topic, int msgCount){ if (flowFile.getSize() == 0L) { session.remove(flowFile); } else { flowFile = session.putAllAttributes(flowFile, attributes); final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start); session.getProvenanceReporter().receive(flowFile, "kafka://" + topic, "Received " + msgCount + " Kafka messages", millis); getLogger().info("Successfully received {} from Kafka with {} messages in {} millis", new Object[]{flowFile, msgCount, millis}); session.transfer(flowFile, REL_SUCCESS); } } }
private void handleParseFailure(final ConsumerRecord<byte[], byte[]> consumerRecord, final ProcessSession session, final Exception cause, final String message) { // If we are unable to parse the data, we need to transfer it to 'parse failure' relationship final Map<String, String> attributes = getAttributes(consumerRecord); attributes.put(KafkaProcessorUtils.KAFKA_OFFSET, String.valueOf(consumerRecord.offset())); attributes.put(KafkaProcessorUtils.KAFKA_PARTITION, String.valueOf(consumerRecord.partition())); attributes.put(KafkaProcessorUtils.KAFKA_TOPIC, consumerRecord.topic()); FlowFile failureFlowFile = session.create(); final byte[] value = consumerRecord.value(); if (value != null) { failureFlowFile = session.write(failureFlowFile, out -> out.write(value)); } failureFlowFile = session.putAllAttributes(failureFlowFile, attributes); final String transitUri = KafkaProcessorUtils.buildTransitURI(securityProtocol, bootstrapServers, consumerRecord.topic()); session.getProvenanceReporter().receive(failureFlowFile, transitUri); session.transfer(failureFlowFile, REL_PARSE_FAILURE); if (cause == null) { logger.error(message); } else { logger.error(message, cause); } session.adjustCounter("Parse Failures", 1, false); }
protected void writeBatch(String payload, FlowFile parent, ProcessContext context, ProcessSession session, Map<String, String> extraAttributes, Relationship rel) throws UnsupportedEncodingException { String charset = context.getProperty(CHARSET).evaluateAttributeExpressions(parent).getValue(); FlowFile flowFile = parent != null ? session.create(parent) : session.create(); flowFile = session.importFrom(new ByteArrayInputStream(payload.getBytes(charset)), flowFile); flowFile = session.putAllAttributes(flowFile, extraAttributes); session.getProvenanceReporter().receive(flowFile, getURI(context)); session.transfer(flowFile, rel); }
protected static void transferEvent(final Event event, ProcessSession session, Relationship relationship) { FlowFile flowFile = session.create(); flowFile = session.putAllAttributes(flowFile, event.getHeaders()); flowFile = session.write(flowFile, new OutputStreamCallback() { @Override public void process(final OutputStream out) throws IOException { out.write(event.getBody()); } }); session.getProvenanceReporter() .create(flowFile); session.transfer(flowFile, relationship); }
@Override public void accept(final JMSResponse response) { if (response == null) { return; } FlowFile flowFile = processSession.create(); flowFile = processSession.write(flowFile, out -> out.write(response.getMessageBody())); final Map<String, String> jmsHeaders = response.getMessageHeaders(); final Map<String, String> jmsProperties = response.getMessageProperties(); flowFile = ConsumeJMS.this.updateFlowFileAttributesWithJMSAttributes(jmsHeaders, flowFile, processSession); flowFile = ConsumeJMS.this.updateFlowFileAttributesWithJMSAttributes(jmsProperties, flowFile, processSession); flowFile = processSession.putAttribute(flowFile, JMS_SOURCE_DESTINATION_NAME, destinationName); processSession.getProvenanceReporter().receive(flowFile, destinationName); processSession.transfer(flowFile, REL_SUCCESS); processSession.commit(); } });
@Override public long writeEvent(ProcessSession session, String transitUri, DDLEventInfo eventInfo, long currentSequenceId, Relationship relationship) { FlowFile flowFile = session.create(); flowFile = session.write(flowFile, (outputStream) -> { super.startJson(outputStream, eventInfo); super.writeJson(eventInfo); jsonGenerator.writeStringField("query", eventInfo.getQuery()); super.endJson(); }); flowFile = session.putAllAttributes(flowFile, getCommonAttributes(currentSequenceId, eventInfo)); session.transfer(flowFile, relationship); session.getProvenanceReporter().receive(flowFile, transitUri); return currentSequenceId + 1; } }
@Override public long writeEvent(ProcessSession session, String transitUri, T eventInfo, long currentSequenceId, Relationship relationship) { FlowFile flowFile = session.create(); flowFile = session.write(flowFile, (outputStream) -> { super.startJson(outputStream, eventInfo); writeJson(eventInfo); // Nothing in the body super.endJson(); }); flowFile = session.putAllAttributes(flowFile, getCommonAttributes(currentSequenceId, eventInfo)); session.transfer(flowFile, relationship); session.getProvenanceReporter().receive(flowFile, transitUri); return currentSequenceId + 1; } }
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { updateMapping(context); final List<FlowFile> flowFiles = session.get(5); if (flowFiles.isEmpty()) { return; } final ComponentLog logger = getLogger(); final int maxBufferSize = context.getProperty(MAX_BUFFER_SIZE).asDataSize(DataUnit.B).intValue(); for (FlowFile flowFile : flowFiles) { if (flowFile.getSize() > maxBufferSize) { session.transfer(flowFile, REL_FAILURE); continue; } final StopWatch stopWatch = new StopWatch(true); flowFile = session.write(flowFile, new ReplaceTextCallback(context, flowFile, maxBufferSize)); logger.info("Transferred {} to 'success'", new Object[]{flowFile}); session.getProvenanceReporter().modifyContent(flowFile, stopWatch.getElapsed(TimeUnit.MILLISECONDS)); session.transfer(flowFile, REL_SUCCESS); } }
@Override public long writeEvent(ProcessSession session, String transitUri, T eventInfo, long currentSequenceId, Relationship relationship) { FlowFile flowFile = session.create(); flowFile = session.write(flowFile, (outputStream) -> { super.startJson(outputStream, eventInfo); writeJson(eventInfo); // Nothing in the body super.endJson(); }); flowFile = session.putAllAttributes(flowFile, getCommonAttributes(currentSequenceId, eventInfo)); session.transfer(flowFile, relationship); session.getProvenanceReporter().receive(flowFile, transitUri); return currentSequenceId + 1; } }
/** * */ private void receiveFromSpring(ProcessSession processSession) { final SpringResponse<?> msgFromSpring = this.exchanger.receive(this.receiveTimeout); if (msgFromSpring != null) { FlowFile flowFileToProcess = processSession.create(); flowFileToProcess = processSession.write(flowFileToProcess, new OutputStreamCallback() { @Override public void process(final OutputStream out) throws IOException { Object payload = msgFromSpring.getPayload(); byte[] payloadBytes = payload instanceof String ? ((String) payload).getBytes() : (byte[]) payload; out.write(payloadBytes); } }); flowFileToProcess = processSession.putAllAttributes(flowFileToProcess, this.extractFlowFileAttributesFromMessageHeaders(msgFromSpring.getHeaders())); processSession.transfer(flowFileToProcess, REL_SUCCESS); processSession.getProvenanceReporter().receive(flowFileToProcess, this.applicationContextConfigFileName); } }
private void handleParseFailure(final StandardEvent event, final ProcessSession session, final Exception cause, final String message) { // If we are unable to parse the data, we need to transfer it to 'parse failure' relationship final Map<String, String> attributes = getAttributes(event.getSender()); FlowFile failureFlowFile = session.create(); failureFlowFile = session.write(failureFlowFile, out -> out.write(event.getData())); failureFlowFile = session.putAllAttributes(failureFlowFile, attributes); final String transitUri = getTransitUri(event.getSender()); session.getProvenanceReporter().receive(failureFlowFile, transitUri); session.transfer(failureFlowFile, REL_PARSE_FAILURE); if (cause == null) { getLogger().error(message); } else { getLogger().error(message, cause); } session.adjustCounter("Parse Failures", 1, false); }
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException { FlowFile flowFile = session.get(); if (flowFile == null) { return; } final StopWatch stopWatch = new StopWatch(true); final byte[] buffer = new byte[(int) flowFile.getSize()]; session.read(flowFile, in -> StreamUtils.fillBuffer(in, buffer)); try { sendMessage(buffer); } catch (final ProcessException processException) { getLogger().error("Failed to send {} to EventHub due to {}; routing to failure", new Object[]{flowFile, processException}, processException); session.transfer(session.penalize(flowFile), REL_FAILURE); return; } final String namespace = context.getProperty(NAMESPACE).getValue(); final String eventHubName = context.getProperty(EVENT_HUB_NAME).getValue(); session.getProvenanceReporter().send(flowFile, "amqps://" + namespace + ".servicebus.windows.net" + "/" + eventHubName, stopWatch.getElapsed(TimeUnit.MILLISECONDS)); session.transfer(flowFile, REL_SUCCESS); }