@Override public Mono<Long> unionAndStore(K key, Collection<K> otherKeys, K destKey) { Assert.notNull(key, "Key must not be null!"); Assert.notNull(otherKeys, "Other keys must not be null!"); Assert.notNull(destKey, "Destination key must not be null!"); return createMono(connection -> Flux.fromIterable(getKeys(key, otherKeys)) // .map(this::rawKey) // .collectList() // .flatMap(serialized -> connection.zUnionStore(rawKey(destKey), serialized))); }
/** * Write the given stream of {@link DataBuffer DataBuffers} to the given {@code WritableByteChannel}. Does * <strong>not</strong> close the channel when the flux is terminated, and does * <strong>not</strong> {@linkplain #release(DataBuffer) release} the data buffers in the * source. If releasing is required, then subscribe to the returned {@code Flux} with a * {@link #releaseConsumer()}. * <p>Note that the writing process does not start until the returned {@code Flux} is subscribed to. * @param source the stream of data buffers to be written * @param channel the channel to write to * @return a flux containing the same buffers as in {@code source}, that starts the writing * process when subscribed to, and that publishes any writing errors and the completion signal */ public static Flux<DataBuffer> write(Publisher<DataBuffer> source, WritableByteChannel channel) { Assert.notNull(source, "'source' must not be null"); Assert.notNull(channel, "'channel' must not be null"); Flux<DataBuffer> flux = Flux.from(source); return Flux.create(sink -> { WritableByteChannelSubscriber subscriber = new WritableByteChannelSubscriber(sink, channel); sink.onDispose(subscriber); flux.subscribe(subscriber); }); }
@Override public ServerRequest.Builder body(String body) { Assert.notNull(body, "Body must not be null"); releaseBody(); DataBufferFactory dataBufferFactory = new DefaultDataBufferFactory(); this.body = Flux.just(body). map(s -> { byte[] bytes = body.getBytes(StandardCharsets.UTF_8); return dataBufferFactory.wrap(bytes); }); return this; }
private Mono<Resource> getResource(String resourcePath, List<? extends Resource> locations) { return Flux.fromIterable(locations) .concatMap(location -> getResource(resourcePath, location)) .next(); }
@Override public Flux<NumericResponse<KeyCommand, Long>> sCard(Publisher<KeyCommand> commands) { return connection.execute(cmd -> Flux.from(commands).concatMap(command -> { Assert.notNull(command.getKey(), "Key must not be null!"); return cmd.scard(command.getKey()).map(value -> new NumericResponse<>(command, value)); })); }
/** * Get elements in {@literal range} from sorted set in reverse {@literal score} ordering. * * @param key must not be {@literal null}. * @param range must not be {@literal null}. * @return * @see <a href="http://redis.io/commands/zrevrange">Redis Documentation: ZREVRANGE</a> */ default Flux<ByteBuffer> zRevRange(ByteBuffer key, Range<Long> range) { Assert.notNull(key, "Key must not be null!"); return zRange(Mono.just(ZRangeCommand.reverseValuesWithin(range).from(key))).flatMap(CommandResponse::getOutput) .map(tuple -> ByteBuffer.wrap(tuple.getValue())); }
Assert.notNull(publisher, "Publisher must not be null"); Assert.isTrue(maxByteCount >= 0, "'maxByteCount' must be a positive number"); return Flux.defer(() -> { AtomicLong countDown = new AtomicLong(maxByteCount); return Flux.from(publisher) .skipUntil(buffer -> { long remainder = countDown.addAndGet(-buffer.readableByteCount()); return remainder < 0; }) .map(buffer -> { long remainder = countDown.get(); if (remainder < 0) { }).doOnDiscard(PooledDataBuffer.class, DataBufferUtils::release);
/** * Relay buffers from the given {@link Publisher} until the total * {@linkplain DataBuffer#readableByteCount() byte count} reaches * the given maximum byte count, or until the publisher is complete. * @param publisher the publisher to filter * @param maxByteCount the maximum byte count * @return a flux whose maximum byte count is {@code maxByteCount} */ public static Flux<DataBuffer> takeUntilByteCount(Publisher<DataBuffer> publisher, long maxByteCount) { Assert.notNull(publisher, "Publisher must not be null"); Assert.isTrue(maxByteCount >= 0, "'maxByteCount' must be a positive number"); return Flux.defer(() -> { AtomicLong countDown = new AtomicLong(maxByteCount); return Flux.from(publisher) .map(buffer -> { long remainder = countDown.addAndGet(-buffer.readableByteCount()); if (remainder < 0) { int length = buffer.readableByteCount() + (int) remainder; return buffer.slice(0, length); } else { return buffer; } }) .takeUntil(buffer -> countDown.get() <= 0); }); // no doOnDiscard necessary, as this method does not drop buffers }
@Override @SafeVarargs public final Mono<Long> remove(K key, V... members) { Assert.notNull(key, "Key must not be null!"); Assert.notEmpty(members, "Members must not be null or empty!"); Assert.noNullElements(members, "Members must not contain null elements!"); return template.createMono(connection -> Flux.fromArray(members) // .map(this::rawValue) // .collectList() // .flatMap(serialized -> connection.zSetCommands().zRem(rawKey(key), serialized))); }
ResolvableType elementType, @Nullable MimeType mimeType, @Nullable Map<String, Object> hints) { Assert.notNull(inputStream, "'inputStream' must not be null"); Assert.notNull(bufferFactory, "'bufferFactory' must not be null"); Assert.notNull(elementType, "'elementType' must not be null"); return Mono.from(inputStream).map(value -> encodeValue(value, mimeType, bufferFactory, elementType, hints, encoding)).flux(); byte[] separator = STREAM_SEPARATORS.getOrDefault(mediaType, NEWLINE_SEPARATOR); return Flux.from(inputStream).map(value -> { DataBuffer buffer = encodeValue(value, mimeType, bufferFactory, elementType, hints, ResolvableType listType = ResolvableType.forClassWithGenerics(List.class, elementType); return Flux.from(inputStream).collectList().map(list -> encodeValue(list, mimeType, bufferFactory, listType, hints, encoding)).flux();
/** * Return a new {@code DataBuffer} composed from joining together the given * {@code dataBuffers} elements. Depending on the {@link DataBuffer} type, * the returned buffer may be a single buffer containing all data of the * provided buffers, or it may be a zero-copy, composite with references to * the given buffers. * <p>If {@code dataBuffers} produces an error or if there is a cancel * signal, then all accumulated buffers will be * {@linkplain #release(DataBuffer) released}. * <p>Note that the given data buffers do <strong>not</strong> have to be * released. They will be released as part of the returned composite. * @param dataBuffers the data buffers that are to be composed * @return a buffer that is composed from the {@code dataBuffers} argument * @since 5.0.3 */ public static Mono<DataBuffer> join(Publisher<DataBuffer> dataBuffers) { Assert.notNull(dataBuffers, "'dataBuffers' must not be null"); return Flux.from(dataBuffers) .collectList() .filter(list -> !list.isEmpty()) .map(list -> list.get(0).factory().join(list)) .doOnDiscard(PooledDataBuffer.class, DataBufferUtils::release); }
@Override public Mono<Long> addAll(K key, Collection<? extends TypedTuple<V>> tuples) { Assert.notNull(key, "Key must not be null!"); Assert.notNull(tuples, "Key must not be null!"); return createMono(connection -> Flux.fromIterable(tuples) // .map(t -> new DefaultTuple(ByteUtils.getBytes(rawValue(t.getValue())), t.getScore())) // .collectList() // .flatMap(serialized -> connection.zAdd(rawKey(key), serialized))); }
@Override @SuppressWarnings("unchecked") public Mono<Long> remove(K key, Object... values) { Assert.notNull(key, "Key must not be null!"); Assert.notNull(values, "Values must not be null!"); if (values.length == 1) { return createMono(connection -> connection.zRem(rawKey(key), rawValue((V) values[0]))); } return createMono(connection -> Flux.fromArray((V[]) values) // .map(this::rawValue) // .collectList() // .flatMap(serialized -> connection.zRem(rawKey(key), serialized))); }
@Override public ListenableFuture<Void> connect(TcpConnectionHandler<P> handler, ReconnectStrategy strategy) { Assert.notNull(handler, "TcpConnectionHandler is required"); Assert.notNull(strategy, "ReconnectStrategy is required"); if (this.stopping) { return handleShuttingDownConnectFailure(handler); } // Report first connect to the ListenableFuture MonoProcessor<Void> connectMono = MonoProcessor.create(); this.tcpClient .handle(new ReactorNettyHandler(handler)) .connect() .doOnNext(updateConnectMono(connectMono)) .doOnError(updateConnectMono(connectMono)) .doOnError(handler::afterConnectFailure) // report all connect failures to the handler .flatMap(Connection::onDispose) // post-connect issues .retryWhen(reconnectFunction(strategy)) .repeatWhen(reconnectFunction(strategy)) .subscribe(); return new MonoToListenableFutureAdapter<>(connectMono); }
@Override public Flux<CommandResponse<KeyCommand, Flux<ByteBuffer>>> hVals(Publisher<KeyCommand> commands) { return connection.execute(cmd -> Flux.from(commands).concatMap(command -> { Assert.notNull(command.getKey(), "Key must not be null!"); Flux<ByteBuffer> result = cmd.hvals(command.getKey()); return Mono.just(new CommandResponse<>(command, result)); })); }
@Override public Flux<TypedTuple<V>> reverseRangeByScoreWithScores(K key, Range<Double> range) { Assert.notNull(key, "Key must not be null!"); Assert.notNull(range, "Range must not be null!"); return createFlux( connection -> connection.zRevRangeByScoreWithScores(rawKey(key), range).map(this::readTypedTuple)); }
@Override public Flux<V> rangeByLex(K key, Range<String> range) { Assert.notNull(key, "Key must not be null!"); Assert.notNull(range, "Range must not be null!"); return createFlux(connection -> connection.zRangeByLex(rawKey(key), range).map(this::readValue)); }
@Override public Flux<TypedTuple<V>> reverseRangeWithScores(K key, Range<Long> range) { Assert.notNull(key, "Key must not be null!"); Assert.notNull(range, "Range must not be null!"); return createFlux(connection -> connection.zRevRangeWithScores(rawKey(key), range).map(this::readTypedTuple)); }