/** * Return the highest sequence ID found across all storefiles in the given list. */ public static OptionalLong getMaxSequenceIdInList(Collection<HStoreFile> sfs) { return sfs.stream().mapToLong(HStoreFile::getMaxSequenceId).max(); }
public static long max(long... nums) { if(nums.length == 0) return Long.MIN_VALUE; return Arrays.stream(nums).max().getAsLong(); }
@Override public long getLastSequenceId() { // Return the highest sequence id across all partitions. This will be correct, // since there is a single id generator across all partitions for the same producer return producers.stream().map(Producer::getLastSequenceId).mapToLong(Long::longValue).max().orElse(-1); }
/** * Return the largest memstoreTS found across all storefiles in the given list. Store files that * were created by a mapreduce bulk load are ignored, as they do not correspond to any specific * put operation, and thus do not have a memstoreTS associated with them. */ public static OptionalLong getMaxMemStoreTSInList(Collection<HStoreFile> sfs) { return sfs.stream().filter(sf -> !sf.isBulkLoadResult()).mapToLong(HStoreFile::getMaxMemStoreTS) .max(); }
@Override public OptionalLong getMaxStoreFileAge() { return getStoreFileAgeStream().max(); }
@Override public OptionalLong execute() { try (final LongStream stream = buildPrevious()) { return stream.max(); } } }
default OptionalLong max(LongPipeline pipeline) { requireNonNull(pipeline); return optimize(pipeline).getAsLongStream().max(); }
public Bucket build() { ArrayList<DataSegment> segmentsList = new ArrayList<>(segments.size()); double[] leftSum = new double[segments.size()]; double[] rightSum = new double[segments.size()]; int i = 0; for (SegmentAndSum segmentAndSum : segments) { segmentsList.add(segmentAndSum.dataSegment); leftSum[i] = segmentAndSum.leftSum; rightSum[i] = segmentAndSum.rightSum; ++i; } long bucketEndMillis = segmentsList .stream() .mapToLong(s -> s.getInterval().getEndMillis()) .max() .orElseGet(interval::getEndMillis); return new Bucket(Intervals.utc(interval.getStartMillis(), bucketEndMillis), segmentsList, leftSum, rightSum); } }
@Override public long getMaxEventId() { return getPartitions().stream() .mapToLong(part -> part.getMaxEventId()) .max() .orElse(-1L); }
@Override public long getNextPrivateWorkerKeyVersion(WorkerTokenServiceType type, String topologyId) { String path = ClusterUtils.secretKeysPath(type, topologyId); try { List<String> versions = stateStorage.get_children(path, false); return versions.stream().mapToLong(Long::valueOf).max().orElse(0); } catch (RuntimeException e) { if (Utils.exceptionCauseIsInstanceOf(KeeperException.NoNodeException.class, e)) { //If the node does not exist, then the version must be 0 return 0; } throw e; } }
/** * Check whether a topology is in throttle-on status or not: if the backpresure/storm-id dir is not empty, this topology has * throttle-on, otherwise throttle-off. But if the backpresure/storm-id dir is not empty and has not been updated for more than * timeoutMs, we treat it as throttle-off. This will prevent the spouts from getting stuck indefinitely if something wrong happens. * * @param stormId The topology Id * @param timeoutMs How long until the backpressure znode is invalid. * @param callback The callback function * @return True is backpresure/storm-id dir is not empty and at least one of the backpressure znodes has not timed out; false otherwise. */ @Override public boolean topologyBackpressure(String stormId, long timeoutMs, Runnable callback) { if (callback != null) { backPressureCallback.put(stormId, callback); } String path = ClusterUtils.backpressureStormRoot(stormId); long mostRecentTimestamp = 0; if (stateStorage.node_exists(path, false)) { List<String> children = stateStorage.get_children(path, callback != null); mostRecentTimestamp = children.stream() .map(childPath -> stateStorage.get_data(ClusterUtils.backpressurePath(stormId, childPath), false)) .filter(data -> data != null) .mapToLong(data -> ByteBuffer.wrap(data).getLong()) .max() .orElse(0); } boolean ret = ((System.currentTimeMillis() - mostRecentTimestamp) < timeoutMs); LOG.debug("topology backpressure is {}", ret ? "on" : "off"); return ret; }
.flatMap(source -> source.getSplits().stream()) .mapToLong(ScheduledSplit::getSequenceId) .max() .orElse(maxAcknowledgedSplit); return updatedUnpartitionedSources;
private void processRecords(final ConsumerRecords<byte[], byte[]> records) { records.partitions().stream().forEach(partition -> { List<ConsumerRecord<byte[], byte[]>> messages = records.records(partition); if (!messages.isEmpty()) { //update maximum offset map for this topic partition long maxOffset = messages.stream() .mapToLong(record -> record.offset()) .max() .getAsLong(); uncommittedOffsetsMap.put(partition, new OffsetAndMetadata(maxOffset + 1L)); //write records to content repository and session if (demarcatorBytes == null) { totalFlowFiles += messages.size(); messages.stream().forEach(message -> { writeData(getProcessSession(), message, partition); }); } else { writeData(getProcessSession(), messages, partition); } } }); }
@Override public void executeBatch(Integer group, Batch<MockRequest<RES>, RES> batch) { long maxLatency = batch.keys().stream().mapToLong(MockRequest::getLatency).max().getAsLong(); _scheduler.schedule(() -> { try { batch.foreach((req, promise) -> { try { promise.done(req.getResult()); } catch (Exception e) { promise.fail(e); } }); } catch (Exception e) { batch.failAll(e); } }, maxLatency, TimeUnit.MILLISECONDS); }
private void processRecords(final ConsumerRecords<byte[], byte[]> records) { records.partitions().stream().forEach(partition -> { List<ConsumerRecord<byte[], byte[]>> messages = records.records(partition); if (!messages.isEmpty()) { //update maximum offset map for this topic partition long maxOffset = messages.stream() .mapToLong(record -> record.offset()) .max() .getAsLong(); //write records to content repository and session if (demarcatorBytes != null) { writeDemarcatedData(getProcessSession(), messages, partition); } else if (readerFactory != null && writerFactory != null) { writeRecordData(getProcessSession(), messages, partition); } else { messages.stream().forEach(message -> { writeData(getProcessSession(), message, partition); }); } totalMessages += messages.size(); uncommittedOffsetsMap.put(partition, new OffsetAndMetadata(maxOffset + 1L)); } }); }
long max = initialData.stream() .mapToLong(keyFunction::apply) .max() .getAsLong();
private void processRecords(final ConsumerRecords<byte[], byte[]> records) { records.partitions().stream().forEach(partition -> { List<ConsumerRecord<byte[], byte[]>> messages = records.records(partition); if (!messages.isEmpty()) { //update maximum offset map for this topic partition long maxOffset = messages.stream() .mapToLong(record -> record.offset()) .max() .getAsLong(); //write records to content repository and session if (demarcatorBytes != null) { writeDemarcatedData(getProcessSession(), messages, partition); } else if (readerFactory != null && writerFactory != null) { writeRecordData(getProcessSession(), messages, partition); } else { messages.stream().forEach(message -> { writeData(getProcessSession(), message, partition); }); } totalMessages += messages.size(); uncommittedOffsetsMap.put(partition, new OffsetAndMetadata(maxOffset + 1L)); } }); }
private void processRecords(final ConsumerRecords<byte[], byte[]> records) { records.partitions().stream().forEach(partition -> { List<ConsumerRecord<byte[], byte[]>> messages = records.records(partition); if (!messages.isEmpty()) { //update maximum offset map for this topic partition long maxOffset = messages.stream() .mapToLong(record -> record.offset()) .max() .getAsLong(); //write records to content repository and session if (demarcatorBytes != null) { writeDemarcatedData(getProcessSession(), messages, partition); } else if (readerFactory != null && writerFactory != null) { writeRecordData(getProcessSession(), messages, partition); } else { messages.stream().forEach(message -> { writeData(getProcessSession(), message, partition); }); } totalMessages += messages.size(); uncommittedOffsetsMap.put(partition, new OffsetAndMetadata(maxOffset + 1L)); } }); }
private void processRecords(final ConsumerRecords<byte[], byte[]> records) { records.partitions().stream().forEach(partition -> { List<ConsumerRecord<byte[], byte[]>> messages = records.records(partition); if (!messages.isEmpty()) { //update maximum offset map for this topic partition long maxOffset = messages.stream() .mapToLong(record -> record.offset()) .max() .getAsLong(); //write records to content repository and session if (demarcatorBytes != null) { writeDemarcatedData(getProcessSession(), messages, partition); } else if (readerFactory != null && writerFactory != null) { writeRecordData(getProcessSession(), messages, partition); } else { messages.stream().forEach(message -> { writeData(getProcessSession(), message, partition); }); } totalMessages += messages.size(); uncommittedOffsetsMap.put(partition, new OffsetAndMetadata(maxOffset + 1L)); } }); }
private void testAvgMinMaxCounter(AvgMinMaxCounter metric, int size) { final long[] values = generateRandomValues(size); for (long value : values) { metric.add(value); } long expectedMin = Arrays.stream(values).min().orElse(0); long expectedMax = Arrays.stream(values).max().orElse(0); long expectedSum = Arrays.stream(values).sum(); long expectedCnt = values.length; double expectedAvg = expectedSum / Math.max(1, expectedCnt); Assert.assertEquals(expectedAvg, metric.getAvg(), (double)200); Assert.assertEquals(expectedMin, metric.getMin()); Assert.assertEquals(expectedMax, metric.getMax()); Assert.assertEquals(expectedCnt, metric.getCount()); Assert.assertEquals(expectedSum, metric.getTotal()); final Map<String, Object> results = metric.values(); Assert.assertEquals(expectedMax, (long)results.get("max_test")); Assert.assertEquals(expectedMin, (long)results.get("min_test")); Assert.assertEquals(expectedCnt, (long)results.get("cnt_test")); Assert.assertEquals(expectedAvg, (double)results.get("avg_test"), (double)200); metric.reset(); }