@Override public long checkFrequencyMillis() { return Stream.of( thresholds ) .mapToLong( CheckPointThreshold::checkFrequencyMillis ) .min().orElse( DEFAULT_CHECKING_FREQUENCY_MILLIS ); } };
private synchronized boolean allAssignmentsHavePropagated(Iterable<QueryExecution> queries) { if (nodes.isEmpty()) { // Assignments can't have propagated, if there are no visible nodes. return false; } long newestAssignment = ImmutableList.copyOf(queries).stream() .map(QueryExecution::getMemoryPool) .mapToLong(VersionedMemoryPoolId::getVersion) .min() .orElse(-1); long mostOutOfDateNode = nodes.values().stream() .mapToLong(RemoteNodeMemory::getCurrentAssignmentVersion) .min() .orElse(Long.MAX_VALUE); return newestAssignment <= mostOutOfDateNode; }
@Override public long getLastSequenceId() { // Return the highest sequence id across all partitions. This will be correct, // since there is a single id generator across all partitions for the same producer return producers.stream().map(Producer::getLastSequenceId).mapToLong(Long::longValue).max().orElse(-1); }
@Override public Long apply(HStoreFile sf) { return sf.getBulkLoadTimestamp().orElse(Long.MAX_VALUE); } }
@Override public Long apply(HStoreFile sf) { return sf.getMaximumTimestamp().orElse(Long.MAX_VALUE); } }
@Override public ColdTestPublisher<T> assertMinRequested(long n) { ColdTestPublisherSubscription<T>[] subs = subscribers; long minRequest = Stream.of(subs) .mapToLong(s -> s.requested) .min() .orElse(0); if (minRequest < n) { throw new AssertionError("Expected minimum request of " + n + "; got " + minRequest); } return this; }
@Override public DefaultTestPublisher<T> assertMinRequested(long n) { TestPublisherSubscription<T>[] subs = subscribers; long minRequest = Stream.of(subs) .mapToLong(s -> s.requested) .min() .orElse(0); if (minRequest < n) { throw new AssertionError("Expected minimum request of " + n + "; got " + minRequest); } return this; }
private long computeCost() { OptionalLong minRequiredCost = Stream.concat( subs.get(Occur.MUST).stream(), subs.get(Occur.FILTER).stream()) .mapToLong(ScorerSupplier::cost) .min(); if (minRequiredCost.isPresent() && minShouldMatch == 0) { return minRequiredCost.getAsLong(); } else { final Collection<ScorerSupplier> optionalScorers = subs.get(Occur.SHOULD); final long shouldCost = MinShouldMatchSumScorer.cost( optionalScorers.stream().mapToLong(ScorerSupplier::cost), optionalScorers.size(), minShouldMatch); return Math.min(minRequiredCost.orElse(Long.MAX_VALUE), shouldCost); } }
default long getLastMajorCompactionTimestamp(TableName table) { return getLiveServerMetrics().values().stream() .flatMap(s -> s.getRegionMetrics().values().stream()) .filter(r -> RegionInfo.getTable(r.getRegionName()).equals(table)) .mapToLong(RegionMetrics::getLastMajorCompactionTimestamp).min().orElse(0); }
/** * @return The value of the Content-Length header or -1L if none specified */ default long getContentLength() { return getHeaders() .contentLength() .orElse(-1L); }
@Override public long getNextPrivateWorkerKeyVersion(WorkerTokenServiceType type, String topologyId) { String path = ClusterUtils.secretKeysPath(type, topologyId); try { List<String> versions = stateStorage.get_children(path, false); return versions.stream().mapToLong(Long::valueOf).max().orElse(0); } catch (RuntimeException e) { if (Utils.exceptionCauseIsInstanceOf(KeeperException.NoNodeException.class, e)) { //If the node does not exist, then the version must be 0 return 0; } throw e; } }
@Override public long getMaxEventId() { return getPartitions().stream() .mapToLong(part -> part.getMaxEventId()) .max() .orElse(-1L); }
/** * Return a list of boundaries for multiple compaction output in ascending order. */ private List<Long> getCompactBoundariesForMajor(Collection<HStoreFile> filesToCompact, long now) { long minTimestamp = filesToCompact.stream().mapToLong(f -> f.getMinimumTimestamp().orElse(Long.MAX_VALUE)).min() .orElse(Long.MAX_VALUE); List<Long> boundaries = new ArrayList<>(); // Add startMillis of all windows between now and min timestamp for (CompactionWindow window = getIncomingWindow(now); window .compareToTimestamp(minTimestamp) > 0; window = window.nextEarlierWindow()) { boundaries.add(window.startMillis()); } boundaries.add(Long.MIN_VALUE); Collections.reverse(boundaries); return boundaries; }
/** * Check whether a topology is in throttle-on status or not: if the backpresure/storm-id dir is not empty, this topology has * throttle-on, otherwise throttle-off. But if the backpresure/storm-id dir is not empty and has not been updated for more than * timeoutMs, we treat it as throttle-off. This will prevent the spouts from getting stuck indefinitely if something wrong happens. * * @param stormId The topology Id * @param timeoutMs How long until the backpressure znode is invalid. * @param callback The callback function * @return True is backpresure/storm-id dir is not empty and at least one of the backpressure znodes has not timed out; false otherwise. */ @Override public boolean topologyBackpressure(String stormId, long timeoutMs, Runnable callback) { if (callback != null) { backPressureCallback.put(stormId, callback); } String path = ClusterUtils.backpressureStormRoot(stormId); long mostRecentTimestamp = 0; if (stateStorage.node_exists(path, false)) { List<String> children = stateStorage.get_children(path, callback != null); mostRecentTimestamp = children.stream() .map(childPath -> stateStorage.get_data(ClusterUtils.backpressurePath(stormId, childPath), false)) .filter(data -> data != null) .mapToLong(data -> ByteBuffer.wrap(data).getLong()) .max() .orElse(0); } boolean ret = ((System.currentTimeMillis() - mostRecentTimestamp) < timeoutMs); LOG.debug("topology backpressure is {}", ret ? "on" : "off"); return ret; }
.mapToLong(ScheduledSplit::getSequenceId) .max() .orElse(maxAcknowledgedSplit); return updatedUnpartitionedSources;
@Override public org.apache.druid.client.cache.CacheStats getStats() { final CacheStats stats = cache.stats(); final long size = cache .policy().eviction() .map(eviction -> eviction.isWeighted() ? eviction.weightedSize() : OptionalLong.empty()) .orElse(OptionalLong.empty()).orElse(-1); return new org.apache.druid.client.cache.CacheStats( stats.hitCount(), stats.missCount(), cache.estimatedSize(), size, stats.evictionCount(), 0, stats.loadFailureCount() ); }
private void testAvgMinMaxCounter(AvgMinMaxCounter metric, int size) { final long[] values = generateRandomValues(size); for (long value : values) { metric.add(value); } long expectedMin = Arrays.stream(values).min().orElse(0); long expectedMax = Arrays.stream(values).max().orElse(0); long expectedSum = Arrays.stream(values).sum(); long expectedCnt = values.length; double expectedAvg = expectedSum / Math.max(1, expectedCnt); Assert.assertEquals(expectedAvg, metric.getAvg(), (double)200); Assert.assertEquals(expectedMin, metric.getMin()); Assert.assertEquals(expectedMax, metric.getMax()); Assert.assertEquals(expectedCnt, metric.getCount()); Assert.assertEquals(expectedSum, metric.getTotal()); final Map<String, Object> results = metric.values(); Assert.assertEquals(expectedMax, (long)results.get("max_test")); Assert.assertEquals(expectedMin, (long)results.get("min_test")); Assert.assertEquals(expectedCnt, (long)results.get("cnt_test")); Assert.assertEquals(expectedAvg, (double)results.get("avg_test"), (double)200); metric.reset(); }
private static ColumnStatisticsObj createBinaryStatistics(String columnName, HiveType columnType, HiveColumnStatistics statistics, OptionalLong rowCount) { BinaryColumnStatsData data = new BinaryColumnStatsData(); statistics.getNullsCount().ifPresent(data::setNumNulls); data.setMaxColLen(statistics.getMaxValueSizeInBytes().orElse(0)); data.setAvgColLen(getAverageColumnLength(statistics.getTotalSizeInBytes(), rowCount, statistics.getNullsCount()).orElse(0)); return new ColumnStatisticsObj(columnName, columnType.toString(), binaryStats(data)); }
private static ColumnStatisticsObj createStringStatistics(String columnName, HiveType columnType, HiveColumnStatistics statistics, OptionalLong rowCount) { StringColumnStatsData data = new StringColumnStatsData(); statistics.getNullsCount().ifPresent(data::setNumNulls); toMetastoreDistinctValuesCount(statistics.getDistinctValuesCount(), statistics.getNullsCount()).ifPresent(data::setNumDVs); data.setMaxColLen(statistics.getMaxValueSizeInBytes().orElse(0)); data.setAvgColLen(getAverageColumnLength(statistics.getTotalSizeInBytes(), rowCount, statistics.getNullsCount()).orElse(0)); return new ColumnStatisticsObj(columnName, columnType.toString(), stringStats(data)); }
private void addStoreFile() throws IOException { HStoreFile f = this.store.getStorefiles().iterator().next(); Path storedir = f.getPath().getParent(); long seqid = this.store.getMaxSequenceId().orElse(0L); Configuration c = TEST_UTIL.getConfiguration(); FileSystem fs = FileSystem.get(c); HFileContext fileContext = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).build(); StoreFileWriter w = new StoreFileWriter.Builder(c, new CacheConfig(c), fs) .withOutputDir(storedir) .withFileContext(fileContext) .build(); w.appendMetadata(seqid + 1, false); w.close(); LOG.info("Added store file:" + w.getPath()); }