private void sendProvenance(ProcessSession session, FlowFile flowFile, int columns, long time, PutFlowFile pff) { final String details = String.format("Put %d cells to HBase.", columns); session.getProvenanceReporter().send(flowFile, getTransitUri(pff), details, time); }
private void populateAttributes(final BundleTracker tracker) { final Map<String, String> kafkaAttrs = new HashMap<>(); kafkaAttrs.put(KafkaProcessorUtils.KAFKA_OFFSET, String.valueOf(tracker.initialOffset)); if (tracker.key != null && tracker.totalRecords == 1) { kafkaAttrs.put(KafkaProcessorUtils.KAFKA_KEY, tracker.key); } kafkaAttrs.put(KafkaProcessorUtils.KAFKA_PARTITION, String.valueOf(tracker.partition)); kafkaAttrs.put(KafkaProcessorUtils.KAFKA_TOPIC, tracker.topic); if (tracker.totalRecords > 1) { kafkaAttrs.put(KafkaProcessorUtils.KAFKA_COUNT, String.valueOf(tracker.totalRecords)); } final FlowFile newFlowFile = getProcessSession().putAllAttributes(tracker.flowFile, kafkaAttrs); final long executionDurationMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - leaseStartNanos); final String transitUri = KafkaProcessorUtils.buildTransitURI(securityProtocol, bootstrapServers, tracker.topic); getProcessSession().getProvenanceReporter().receive(newFlowFile, transitUri, executionDurationMillis); tracker.updateFlowFile(newFlowFile); }
session.getProvenanceReporter().modifyAttributes(flowFile); session.transfer(flowFile, REL_MATCH); getLogger().info("Matched {} Grok Expressions and added attributes to FlowFile {}", new Object[]{grokResults.size(), flowFile}); session.getProvenanceReporter().modifyContent(conFlowfile, "Replaced content with parsed Grok fields and values", stopWatch.getElapsed(TimeUnit.MILLISECONDS)); session.transfer(conFlowfile, REL_MATCH);
session.getProvenanceReporter().send(requestFlowFile, url.toExternalForm(), true); session.getProvenanceReporter().fetch(responseFlowFile, url.toExternalForm(), millis); } else { session.getProvenanceReporter().receive(responseFlowFile, url.toExternalForm(), millis); session.getProvenanceReporter().modifyAttributes(requestFlowFile, "The " + attributeKey + " has been added. The value of which is the body of a http call to " + url.toExternalForm() + ". It took " + millis + "millis,");
.toMillis(System.nanoTime() - startNanos); if (requestFlowFile != null) { session.getProvenanceReporter().fetch(responseFlowFile, endpoint, millis); } else { session.getProvenanceReporter().receive(responseFlowFile, endpoint, millis); .toMillis(System.nanoTime() - startNanos); if (requestFlowFile != null) { session.getProvenanceReporter().fetch(responseFlowFile, endpoint, millis); } else { session.getProvenanceReporter().receive(responseFlowFile, endpoint, millis); session.getProvenanceReporter().modifyAttributes(requestFlowFile, "The " + attributeKey + " has been added. The value of which is the body of a http call to "
session.getProvenanceReporter().fetch(flowFile, blob.getSnapshotQualifiedUri().toString(), transferMillis); } catch (IllegalArgumentException | URISyntaxException | StorageException | ProcessException e) { if (e instanceof ProcessException && storedException.get() == null) {
session.getProvenanceReporter().modifyContent(putFlowFile, "Converted "+totalRecordCount.get()+" records", System.currentTimeMillis() - startTime);
session.getProvenanceReporter().fetch(handlerFlowFile, transitUri); if (!destination.equals(DESTINATION_CONTENT.getValue())) { session.getProvenanceReporter().modifyAttributes(handlerFlowFile, "Added attributes to FlowFile from " + transitUri);
new Object[]{flowFile, filename, host, REL_NOT_FOUND.getName()}); session.transfer(session.penalize(flowFile), REL_NOT_FOUND); session.getProvenanceReporter().route(flowFile, REL_NOT_FOUND); return; } catch (final PermissionDeniedException e) { new Object[]{flowFile, filename, host, REL_PERMISSION_DENIED.getName()}); session.transfer(session.penalize(flowFile), REL_PERMISSION_DENIED); session.getProvenanceReporter().route(flowFile, REL_PERMISSION_DENIED); return; } catch (final ProcessException | IOException e) { session.getProvenanceReporter().fetch(flowFile, protocolName + "://" + host + ":" + port + "/" + filename, stopWatch.getElapsed(TimeUnit.MILLISECONDS)); session.transfer(flowFile, REL_SUCCESS);
session.getProvenanceReporter().modifyAttributes(flowFile); session.transfer(flowFile, REL_SUCCESS);
session.getProvenanceReporter().modifyContent(flowFile, "Replaced content with parsed CEF fields and values"); break; session.getProvenanceReporter().route(flowFile, REL_FAILURE); session.transfer(flowFile, REL_FAILURE); session.commit();
session.getProvenanceReporter().fetch(flowFile, new StringBuilder("rethinkdb://").append(databaseName).append("/").append(tableName).append("/").append(id).toString(), (endTimeMillis - startTimeMillis));
processSession.getProvenanceReporter().modifyContent(flowFile, "Replaced content with result of expression " + jsonPathExp.getPath());
new Object[]{flowFile, xpathResults.size(), destRel.getName()}); session.transfer(flowFile, destRel); session.getProvenanceReporter().modifyAttributes(flowFile); } else if (DESTINATION_CONTENT.equals(destination)) { logger.info("Successfully updated content for {}; routing to 'matched'", new Object[]{flowFile}); session.transfer(flowFile, REL_MATCH); session.getProvenanceReporter().modifyContent(flowFile);
session.getProvenanceReporter().modifyAttributes(flowFile); session.transfer(flowFile, REL_MATCH); logger.info("Matched {} Regular Expressions and added attributes to FlowFile {}", new Object[]{regexResults.size(), flowFile});
} else if (!Files.exists(filePath)) { getLogger().log(levelFileNotFound, "Could not fetch file {} from file system for {} because the file does not exist; routing to not.found", new Object[] {file, flowFile}); session.getProvenanceReporter().route(flowFile, REL_NOT_FOUND); session.transfer(session.penalize(flowFile), REL_NOT_FOUND); return; getLogger().log(levelPermDenied, "Could not fetch file {} from file system for {} due to user {} not having sufficient permissions to read the file; routing to permission.denied", new Object[] {file, flowFile, user}); session.getProvenanceReporter().route(flowFile, REL_PERMISSION_DENIED); session.transfer(session.penalize(flowFile), REL_PERMISSION_DENIED); return; session.getProvenanceReporter().modifyContent(flowFile, "Replaced content of FlowFile with contents of " + file.toURI(), stopWatch.getElapsed(TimeUnit.MILLISECONDS)); session.transfer(flowFile, REL_SUCCESS);
private void transferFlowFiles(ProcessSession session, RoutingResult result, HiveOptions options) { if (successfulRecordCount.get() > 0) { // Transfer the flow file with successful records Map<String, String> updateAttributes = new HashMap<>(); updateAttributes.put(HIVE_STREAMING_RECORD_COUNT_ATTR, Integer.toString(successfulRecordCount.get())); updateAttributes.put(AbstractHiveQLProcessor.ATTR_OUTPUT_TABLES, options.getQualifiedTableName()); successFlowFile.set(session.putAllAttributes(successFlowFile.get(), updateAttributes)); session.getProvenanceReporter().send(successFlowFile.get(), options.getMetaStoreURI()); result.routeTo(successFlowFile.get(), REL_SUCCESS); } else { session.remove(successFlowFile.get()); } if (failedRecordCount.get() > 0) { // There were some failed records, so transfer that flow file to failure Map<String, String> updateAttributes = new HashMap<>(); updateAttributes.put(HIVE_STREAMING_RECORD_COUNT_ATTR, Integer.toString(failedRecordCount.get())); updateAttributes.put(AbstractHiveQLProcessor.ATTR_OUTPUT_TABLES, options.getQualifiedTableName()); failureFlowFile.set(session.putAllAttributes(failureFlowFile.get(), updateAttributes)); result.routeTo(failureFlowFile.get(), REL_FAILURE); } else { session.remove(failureFlowFile.get()); } result.getRoutedFlowFiles().forEach((relationship, flowFiles) -> { session.transfer(flowFiles, relationship); }); }
private void populateAttributes(final BundleTracker tracker) { final Map<String, String> kafkaAttrs = new HashMap<>(); kafkaAttrs.put(KafkaProcessorUtils.KAFKA_OFFSET, String.valueOf(tracker.initialOffset)); if (tracker.key != null && tracker.totalRecords == 1) { kafkaAttrs.put(KafkaProcessorUtils.KAFKA_KEY, tracker.key); } kafkaAttrs.put(KafkaProcessorUtils.KAFKA_PARTITION, String.valueOf(tracker.partition)); kafkaAttrs.put(KafkaProcessorUtils.KAFKA_TOPIC, tracker.topic); if (tracker.totalRecords > 1) { // Add a record.count attribute to remain consistent with other record-oriented processors. If not // reading/writing records, then use "kafka.count" attribute. if (tracker.recordWriter == null) { kafkaAttrs.put(KafkaProcessorUtils.KAFKA_COUNT, String.valueOf(tracker.totalRecords)); } else { kafkaAttrs.put("record.count", String.valueOf(tracker.totalRecords)); } } final FlowFile newFlowFile = getProcessSession().putAllAttributes(tracker.flowFile, kafkaAttrs); final long executionDurationMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - leaseStartNanos); final String transitUri = KafkaProcessorUtils.buildTransitURI(securityProtocol, bootstrapServers, tracker.topic); getProcessSession().getProvenanceReporter().receive(newFlowFile, transitUri, executionDurationMillis); tracker.updateFlowFile(newFlowFile); }