@Override public OptionalLong execute() { try (final LongStream stream = buildPrevious()) { return stream.findFirst(); } } }
default OptionalLong findFirst(LongPipeline pipeline) { requireNonNull(pipeline); return optimize(pipeline).getAsLongStream().findFirst(); }
@Override public OptionalLong contentLength() { return OptionalHelper.from(first(Http.Header.CONTENT_LENGTH)).stream() .mapToLong(Long::parseLong).findFirst(); }
private void onChange(final Kind<?> kind, final Path path) { try { debug("OnChange: %s(%s)", path, kind); Path candidate = relativePath(path); if (candidate == null || !includes.matches(candidate) || excludes.matches(candidate)) { debug("Ignoring change: %s", path); return; } // weak hash check: avoid change on conf/* that are propagated to target/classs by maven. File f = candidate.toFile(); // len and lastModified reports 0 on external paths, we hack and use now as millis long l = LongStream.of(f.length(), f.lastModified(), System.currentTimeMillis()) .filter(it -> it > 0) .findFirst() .getAsLong(); String h = f.getName() + ":" + l; debug("hash %s > new hash %s", hash.get(), h); if (!hash.getAndSet(h).equals(h)) { debug("File change detected: %s", path); // reload startApp(args); } else { debug("Ignoring change: %s", path); } } catch (Exception ex) { ex.printStackTrace(); } }
.filter(p -> p instanceof CreateTableProcedure).map(p -> (CreateTableProcedure) p) .filter(p -> p.getTableName().equals(RSGROUP_TABLE_NAME)).mapToLong(Procedure::getProcId) .findFirst(); long procId; if (optProcId.isPresent()) {
long getFragmentHost() { return wiring.entrySet().stream() // .filter(e -> e.getKey().startsWith(HostNamespace.HOST_NAMESPACE)) // .map(Map.Entry::getValue) // .mapToLong(this::getBundleId) // .findFirst() // .orElse(-1); }
@Override public long nextLong(long bound) { return random.longs(1, 0, bound).findFirst().getAsLong(); }
@Override public OptionalLong findFirst() { // This is a terminal operation return evalAndclose(() -> stream.findFirst()); }
@Override public long nextLong(long bound) { return random.longs(1, 0, bound).findFirst().getAsLong(); }
@Override public long getMaxLogicalPageSize() { return tables.values() .stream() .map(AbstractTableManager::getStats) .mapToLong(TableManagerStats::getMaxLogicalPageSize) .findFirst() .orElse(0); }
@Override public long nextLong(long lower, long upper) { return random.longs(1, lower, upper).findFirst().getAsLong(); }
public Optional<QuoteOfTheDay> random() { long count = repository.count(); if(count==0) return Optional.empty(); Random r = new Random(); long randomIndex=count<=Integer.MAX_VALUE? r.nextInt((int)count): r.longs(1, 0, count).findFirst().orElseThrow(AssertionError::new); return StreamSupport.stream(repository.findAll().spliterator(), false) .skip(randomIndex).findFirst(); }
public long lastTransactionTimestamp() { return entries.stream() .map(e -> Long.parseLong(e[TRANSACTION_TIMESTAMP.getColumn()])) .sorted(reverseOrder()) .limit(1) .mapToLong(e -> e) .findFirst() .getAsLong(); }
@Override public long getOldestCheckpointCreationTimestamp() { return StreamSupport.stream(store.checkpoints().spliterator(), false) .map(store::allCheckpointInfo) .map(i -> i.get(CHECKPOINT_METADATA + "created")) .mapToLong(l -> l == null ? 0 : Long.valueOf(l)) .sorted() .findFirst() .orElse(0); }
public long firstTransactionTimestamp() { return entries.stream() .mapToLong(e -> Long.parseLong(e[TRANSACTION_TIMESTAMP.getColumn()])) .sorted() .limit(1) .findFirst() .getAsLong(); }
public static long randomLongBetween(long startInclusive, long endInclusive) { return RANDOM.longs(startInclusive, addExact(endInclusive, 1)) .findFirst() .getAsLong(); }
default OptionalLong findFirst(LongPipeline pipeline) { requireNonNull(pipeline); return optimize(pipeline).getAsLongStream().findFirst(); }
@Override public OptionalLong findFirst() { if (context.fjp != null) return context.terminate(stream()::findFirst); return stream().findFirst(); }
static ClusterPlacement createClusterPlacement( Supplier<Long> timeSource, int clusterNumber, SpydraArgument.Pooling pooling) { long time = timeSource.get() / 1000; long age = pooling.getMaxAge().getSeconds(); long timeOffset = new Random(clusterNumber).longs(1, 0, age).findFirst().getAsLong(); long generation = computeGeneration(time + timeOffset, age); return new ClusterPlacementBuilder() .clusterNumber(clusterNumber) .clusterGeneration(generation) .build(); }
@Override public long getLatestLogId(String repositoryId, String... eventIds) { QueryBuilder builder = new AuditQueryBuilder().predicate(Predicates.eq(LOG_REPOSITORY_ID, repositoryId)) .and(Predicates.in(LOG_EVENT_ID, eventIds)) .order(OrderByExprs.desc(LOG_ID)) .limit(1); return queryLogs(builder).stream().mapToLong(LogEntry::getId).findFirst().orElse(0L); }