private void appendLine(final ProcessSession session, final Map<Relationship, Map<Group, FlowFile>> flowFileMap, final Relationship relationship, final FlowFile original, final String line, final Charset charset, final Group group) { final Map<Group, FlowFile> groupToFlowFileMap = flowFileMap.computeIfAbsent(relationship, k -> new HashMap<>()); FlowFile flowFile = groupToFlowFileMap.get(group); if (flowFile == null) { flowFile = session.create(original); } flowFile = session.append(flowFile, new OutputStreamCallback() { @Override public void process(final OutputStream out) throws IOException { out.write(line.getBytes(charset)); } }); groupToFlowFileMap.put(group, flowFile); }
flowFile = session.append(flowFile, out -> { boolean useDemarcator = demarcateFirstRecord; for (final ConsumerRecord<byte[], byte[]> record : records) {
/** * Executes the given callback against the content corresponding to the * given FlowFile, such that any data written to the OutputStream of the * content will be appended to the end of FlowFile. * <p> * <i>Note</i>: The OutputStream provided to the given OutputStreamCallback * will not be accessible once this method has completed its execution. * * @param flowFile the flowfile for which content should be appended * @param writer used to write new bytes to the flowfile content * @return the updated flowfile reference for the new content * @throws FlowFileAccessException if an attempt is made to access the OutputStream provided to the given OutputStreamCallback after this method completed its execution */ @Override public SessionFile append(FlowFile flowFile, OutputStreamCallback writer) throws FlowFileAccessException { SessionFile sf = wrap(flowFile); sf.flowFile = onMod(s.append(sf.flowFile, writer)); return sf; }
flowFile = session.append(flowFile, out -> { boolean useDemarcator = demarcateFirstRecord; for (final ConsumerRecord<byte[], byte[]> record : records) {
private void finalizeFlowFile(final ProcessSession session, final HBaseClientService hBaseClientService, FlowFile flowFile, final String tableName, Long rowsPulled, Exception e) { Relationship rel = REL_SUCCESS; flowFile = session.putAttribute(flowFile, HBASE_ROWS_COUNT_ATTR, rowsPulled.toString()); final AtomicReference<IOException> ioe = new AtomicReference<>(null); flowFile = session.append(flowFile, (out) -> { try{ out.write("]".getBytes()); }catch(IOException ei){ ioe.set(ei); } }); if (e != null || ioe.get() != null) { flowFile = session.putAttribute(flowFile, "scanhbase.error", (e==null?e:ioe.get()).toString()); rel = REL_FAILURE; } else { session.getProvenanceReporter().receive(flowFile, hBaseClientService.toTransitUri(tableName, "{ids}")); } session.transfer(flowFile, rel); }
flowFile = session.append(flowFile, out -> { boolean useDemarcator = demarcateFirstRecord; for (final ConsumerRecord<byte[], byte[]> record : recordList) {
flowFile = session.append(flowFile, (out) -> { if (rowsPulledHolder.get() > 0){ out.write(JSON_ARRAY_DELIM);
flowFile = session.append(flowFile, out -> { boolean useDemarcator = demarcateFirstRecord; for (final ConsumerRecord<byte[], byte[]> record : recordList) {
flowFile = session.append(flowFile, out -> { boolean useDemarcator = demarcateFirstRecord; for (final ConsumerRecord<byte[], byte[]> record : recordList) {
/** * Will concatenate the contents of the provided array of {@link FlowFile}s * into a single {@link FlowFile}. While this operation is as general as it * is described in the previous sentence, in the context of this processor * there can only be two {@link FlowFile}s with the first {@link FlowFile} * representing the header content of the split and the second * {@link FlowFile} represents the split itself. */ private FlowFile concatenateContents(FlowFile sourceFlowFile, ProcessSession session, FlowFile... flowFiles) { FlowFile mergedFlowFile = session.create(sourceFlowFile); for (FlowFile flowFile : flowFiles) { mergedFlowFile = session.append(mergedFlowFile, new OutputStreamCallback() { @Override public void process(OutputStream out) throws IOException { try (InputStream is = session.read(flowFile)) { IOUtils.copy(is, out); } } }); } session.remove(flowFiles[1]); // in current usage we always have 2 files return mergedFlowFile; }
invalidFF.set(session.append(invalidFF.get(), new OutputStreamCallback() { @Override public void process(OutputStream out) throws IOException { validFF.set(session.append(validFF.get(), new OutputStreamCallback() { @Override public void process(OutputStream out) throws IOException { validFF.set(session.append(validFF.get(), new OutputStreamCallback() { @Override public void process(OutputStream out) throws IOException { } else { invalidFF.set(session.append(invalidFF.get(), new OutputStreamCallback() { @Override public void process(OutputStream out) throws IOException {
private byte[] initAvroWriter(ProcessSession session, String codec, DataFileStream<GenericRecord> reader, DataFileWriter<GenericRecord> writer, AtomicReference<FlowFile> flowFileRef) { writer.setCodec(CodecFactory.fromString(codec)); // Transfer metadata (this is a subset of the incoming file) for (String metaKey : reader.getMetaKeys()) { if (!RESERVED_METADATA.contains(metaKey)) { writer.setMeta(metaKey, reader.getMeta(metaKey)); } } final ByteArrayOutputStream avroHeader = new ByteArrayOutputStream(); flowFileRef.set(session.append(flowFileRef.get(), (out) -> { // Create writer so that records can be appended later. writer.create(reader.getSchema(), avroHeader); writer.close(); final byte[] header = avroHeader.toByteArray(); out.write(header); })); // Capture the Avro header byte array that is just written to the FlowFile. // This is needed when Avro records are appended to the same FlowFile. return avroHeader.toByteArray(); }
/** * Disposes the message by converting it to a {@link FlowFile} transferring * it to the REL_SUCCESS relationship. */ private void transfer(Message emailMessage, ProcessContext context, ProcessSession processSession) { long start = System.nanoTime(); FlowFile flowFile = processSession.create(); flowFile = processSession.append(flowFile, out -> { try { emailMessage.writeTo(out); } catch (MessagingException e) { throw new IOException(e); } }); long executionDuration = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start); String fromAddressesString = ""; try { Address[] fromAddresses = emailMessage.getFrom(); if (fromAddresses != null) { fromAddressesString = Arrays.asList(fromAddresses).toString(); } } catch (MessagingException e) { this.logger.warn("Failed to retrieve 'From' attribute from Message."); } processSession.getProvenanceReporter().receive(flowFile, this.displayUrl, "Received message from " + fromAddressesString, executionDuration); this.getLogger().info("Successfully received {} from {} in {} millis", new Object[]{flowFile, fromAddressesString, executionDuration}); processSession.transfer(flowFile, REL_SUCCESS); }
FlowFile flowFile = processSession.create(); flowFile = processSession.append(flowFile, new OutputStreamCallback() { @Override public void process(final OutputStream out) throws IOException {
private void appendAvroRecords(ProcessSession session, byte[] avroHeader, DataFileWriter<GenericRecord> writer, AtomicReference<FlowFile> flowFileRef, List<HiveStreamingRecord> hRecords) { flowFileRef.set(session.append(flowFileRef.get(), (out) -> { if (hRecords != null) { // Initialize the writer again as append mode, so that Avro header is written only once. writer.appendTo(new SeekableByteArrayInput(avroHeader), out); try { for (HiveStreamingRecord hRecord : hRecords) { writer.append(hRecord.getRecord()); } } catch (IOException ioe) { // The records were put to Hive Streaming successfully, but there was an error while writing the // Avro records to the flow file. Log as an error and move on. logger.error("Error writing Avro records (which were sent successfully to Hive Streaming) to the flow file, " + ioe, ioe); } } writer.close(); })); }
try { final byte[] rawMessage = event.getData(); FlowFile appendedFlowFile = session.append(batch.getFlowFile(), new OutputStreamCallback() { @Override public void process(final OutputStream out) throws IOException {
split = session.append(split, new OutputStreamCallback() { @Override public void process(OutputStream out) throws IOException {
flowFile = session.append(flowFile, new OutputStreamCallback() { @Override public void process(final OutputStream out) throws IOException {
attributes.put(ATTACHMENT_ORIGINAL_UUID, parentUuid); attributes.put(ATTACHMENT_ORIGINAL_FILENAME, originalFlowFileName); split = session.append(split, new OutputStreamCallback() { @Override public void process(OutputStream out) throws IOException {
flowFile = session.append(flowFile, new OutputStreamCallback() { @Override public void process(final OutputStream out) throws IOException {