@Override protected boolean begin() { if(closed.compareAndSet(BUILDING, STARTED)) { beginning(); previous.begin(); return true; } return false; }
void check(AtomicLong lastTime, long timeout) { long now = System.nanoTime(); long elapsed = now - lastTime.get(); if (elapsed < timeout) { promiseFactory.scheduledExecutor().schedule( () -> check(lastTime, timeout), timeout - elapsed, NANOSECONDS); } else { PushEvent<T> error = PushEvent.error(new TimeoutException()); close(error); // Upstream close is needed as we have no direct backpressure upstreamClose(error); } }
@Override public <R> PushStream<R> coalesce(int count, Function<Collection<T>,R> f) { if (count <= 0) throw new IllegalArgumentException( "A coalesce operation must collect a positive number of events"); // This could be optimised to only use a single collection queue. // It would save some GC, but is it worth it? return coalesce(() -> count, f); }
@Override public void close() { PushEvent<T> close = PushEvent.close(); if (close(close, true)) { upstreamClose(close); } }
@Override public PushStream<T> limit(Duration maxTime) { Runnable start = () -> promiseFactory.scheduledExecutor().schedule( () -> close(), maxTime.toNanos(), NANOSECONDS); AbstractPushStreamImpl<T> eventStream = new IntermediatePushStreamImpl<T>( psp, promiseFactory, this) { @Override protected void beginning() { start.run(); } }; updateNext((event) -> { try { return eventStream.handleEvent(event); } catch (Exception e) { close(PushEvent.error(e)); return ABORT; } }); return eventStream; }
@Override public Promise<Void> forEach(Consumer< ? super T> action) { Deferred<Void> d = promiseFactory.deferred(); updateNext((event) -> { try { switch(event.getType()) { case DATA: action.accept(event.getData()); return CONTINUE; case CLOSE: d.resolve(null); break; case ERROR: d.fail(event.getFailure()); break; } close(event.nodata()); return ABORT; } catch (Exception e) { close(PushEvent.error(e)); return ABORT; } }); begin(); return d.getPromise(); }
previousWindowSize.set(windowSize); promiseFactory.scheduledExecutor().schedule( getWindowTask(p, f, time, maxEvents, lock, count, queueRef, timestamp, counter, previousWindowSize, ex), queueRef.set(getQueueForInternalBuffering(maxEvents.getAsInt())); }; updateNext((event) -> { try { if (eventStream.closed.get() == CLOSED) { aggregateAndForward(f, eventStream, event, queue, ex, elapsed); getQueueForInternalBuffering(maxEvents.getAsInt())); long nextWindow = time.get().toNanos(); long backpressure = previousWindowSize.getAndSet(nextWindow) - elapsed; promiseFactory.scheduledExecutor().schedule( getWindowTask(eventStream, f, time, maxEvents, lock, newCount, queueRef, timestamp, counter, previousWindowSize, ex), try { eventStream .handleEvent(PushEvent.data(f.apply( Long.valueOf(NANOSECONDS
executor.execute(() -> { try { eventStream.handleEvent(PushEvent.data(f.apply( Long.valueOf(NANOSECONDS.toMillis(elapsed)), collected))); } catch (Exception e) { PushEvent<T> error = PushEvent.error(e); close(error); upstreamClose(error); queueRef.set(getQueueForInternalBuffering(maxEvents.getAsInt())); promiseFactory.scheduledExecutor().schedule( getWindowTask(eventStream, f, time, maxEvents, lock, expectedCounter + 1, queueRef, timestamp, counter, previousWindowSize, executor),
.set(getQueueForInternalBuffering(count.getAsInt())); updateNext((event) -> { try { Queue<T> queue; getQueueForInternalBuffering(count.getAsInt())); return aggregateAndForward(f, eventStream, event, queue); } else { eventStream.handleEvent( PushEvent.data(f.apply(queue))); return eventStream.handleEvent(event.nodata()); } catch (Exception e) { close(PushEvent.error(e)); return ABORT;
try { if (!e.isTerminal()) { toReturn = downstream.handleEvent(e); } else if (count.decrementAndGet() == 0) { downstream.handleEvent(e); toReturn = ABORT; } else { downstream.handleEvent(PushEvent.error(ex)); } catch (Exception ex2) { /* Just ignore this */} toReturn = ABORT; close(); } catch (Exception ex2) { /* Just ignore this */} try { forEachEvent(consumer); source.forEachEvent(consumer); }; return eventStream.onClose(() -> { try { close(); } catch (Exception e) {
@Override public PushStream<T> distinct() { Set<T> set = Collections.<T>newSetFromMap(new ConcurrentHashMap<>()); return filter(set::add); }
private <R> void aggregateAndForward(BiFunction<Long,Collection<T>,R> f, AbstractPushStreamImpl<R> eventStream, PushEvent< ? extends T> event, Queue<T> queue, Executor executor, long elapsed) { executor.execute(() -> { try { if (!queue.offer(event.getData())) { ((ArrayQueue<T>) queue).forcePush(event.getData()); } long result = eventStream.handleEvent(PushEvent.data( f.apply(Long.valueOf(NANOSECONDS.toMillis(elapsed)), queue))); if (result < 0) { close(); } } catch (Exception e) { close(PushEvent.error(e)); } }); }
@Override public PushStream<T> buffer() { return psp.createStream(c -> { forEachEvent(c); return this; }); }
@Override public <A extends T> Promise<A[]> toArray(IntFunction<A[]> generator) { return collect(Collectors.toList()) .map(l -> l.toArray(generator.apply(l.size()))); }
protected boolean close(PushEvent<T> event) { return close(event, true); }
@Override public Promise<Optional<T>> findAny() { return findFirst(); }
@Override public <U> Promise<U> reduce(U identity, BiFunction<U, ? super T, U> accumulator, BinaryOperator<U> combiner) { Deferred<U> d = promiseFactory.deferred(); AtomicReference<U> iden = new AtomicReference<>(identity); updateNext(event -> { try { switch(event.getType()) { case DATA: iden.updateAndGet((e) -> accumulator.apply(e, event.getData())); return CONTINUE; case CLOSE: d.resolve(iden.get()); break; case ERROR: d.fail(event.getFailure()); break; } close(event.nodata()); return ABORT; } catch (Exception e) { close(PushEvent.error(e)); return ABORT; } }); begin(); return d.getPromise(); }
previousWindowSize.set(windowSize); promiseFactory.scheduledExecutor().schedule( getWindowTask(p, f, time, maxEvents, lock, count, queueRef, timestamp, counter, previousWindowSize, ex), queueRef.set(getQueueForInternalBuffering(maxEvents.getAsInt())); }; updateNext((event) -> { try { if (eventStream.closed.get() == CLOSED) { aggregateAndForward(f, eventStream, event, queue, ex, elapsed); getQueueForInternalBuffering(maxEvents.getAsInt())); long nextWindow = time.get().toNanos(); long backpressure = previousWindowSize.getAndSet(nextWindow) - elapsed; promiseFactory.scheduledExecutor().schedule( getWindowTask(eventStream, f, time, maxEvents, lock, newCount, queueRef, timestamp, counter, previousWindowSize, ex), try { eventStream .handleEvent(PushEvent.data(f.apply( Long.valueOf(NANOSECONDS
@Override public PushStream<T> limit(Duration maxTime) { Runnable start = () -> promiseFactory.scheduledExecutor().schedule( () -> close(), maxTime.toNanos(), NANOSECONDS); AbstractPushStreamImpl<T> eventStream = new IntermediatePushStreamImpl<T>( psp, promiseFactory, this) { @Override protected void beginning() { start.run(); } }; updateNext((event) -> { try { return eventStream.handleEvent(event); } catch (Exception e) { close(PushEvent.error(e)); return ABORT; } }); return eventStream; }
executor.execute(() -> { try { eventStream.handleEvent(PushEvent.data(f.apply( Long.valueOf(NANOSECONDS.toMillis(elapsed)), collected))); } catch (Exception e) { PushEvent<T> error = PushEvent.error(e); close(error); upstreamClose(error); queueRef.set(getQueueForInternalBuffering(maxEvents.getAsInt())); promiseFactory.scheduledExecutor().schedule( getWindowTask(eventStream, f, time, maxEvents, lock, expectedCounter + 1, queueRef, timestamp, counter, previousWindowSize, executor),