@Override public void forceUpdate(long[] procIds) { Arrays.stream(procIds).forEach(procId -> forceUpdateExecutor.execute(() -> { try { forceUpdateProcedure(procId); } catch (IOException e) { LOG.warn("Failed to force update procedure with pid={}", procId); } })); } });
@Override public Void execute() { try (final LongStream stream = buildPrevious()) { stream.forEach(consumer); } return null; } }
default <T> void forEach(LongPipeline pipeline, LongConsumer action) { requireNonNull(pipeline); requireNonNull(action); optimize(pipeline).getAsLongStream().forEach(action); }
static long cost(LongStream costs, int numScorers, int minShouldMatch) { // the idea here is the following: a boolean query c1,c2,...cn with minShouldMatch=m // could be rewritten to: // (c1 AND (c2..cn|msm=m-1)) OR (!c1 AND (c2..cn|msm=m)) // if we assume that clauses come in ascending cost, then // the cost of the first part is the cost of c1 (because the cost of a conjunction is // the cost of the least costly clause) // the cost of the second part is the cost of finding m matches among the c2...cn // remaining clauses // since it is a disjunction overall, the total cost is the sum of the costs of these // two parts // If we recurse infinitely, we find out that the cost of a msm query is the sum of the // costs of the num_scorers - minShouldMatch + 1 least costly scorers final PriorityQueue<Long> pq = new PriorityQueue<Long>(numScorers - minShouldMatch + 1) { @Override protected boolean lessThan(Long a, Long b) { return a > b; } }; costs.forEach(pq::insertWithOverflow); return StreamSupport.stream(pq.spliterator(), false).mapToLong(Number::longValue).sum(); }
@Override public void forEach(LongConsumer action) { finallyClose(() -> stream().forEach(action)); }
private void insertWidgetProperties(long widgetId, long... ids) { Arrays.stream(ids).forEach( id -> db.executeInsert( "widget_properties", "ID", valueOf(id), "WIDGET_ID", valueOf(widgetId))); }
AtomicBoolean advance=new AtomicBoolean(false); seqnos.forEach(seqno -> { T element=null; lock.lock();
private void insertActiveDashboards(long dashboardId, long... ids) { Arrays.stream(ids).forEach( id -> db.executeInsert( "active_dashboards", "ID", valueOf(id), "DASHBOARD_ID", valueOf(dashboardId))); }
private void insertWidgets(long dashboardId, long... ids) { Arrays.stream(ids).forEach( id -> db.executeInsert( "widgets", "ID", valueOf(id), "WIDGET_KEY", valueOf(id), "DASHBOARD_ID", valueOf(dashboardId))); }
private void insertProjectDashboards(long... ids) { Arrays.stream(ids).forEach(id -> insertDashboard(id, false)); }
private void insertGlobalDashboards(long... ids) { Arrays.stream(ids).forEach(id -> insertDashboard(id, true)); }
private Object getExpectedValueLongs(double maxError, long... values) { if (values.length == 0) { return null; } QuantileDigest qdigest = new QuantileDigest(maxError); Arrays.stream(values).forEach(qdigest::add); return new SqlVarbinary(qdigest.serialize().getBytes()); }
@Test public void constructor_throws_IAE_if_timestamp_is_less_than_0() { LongStream.of(0, 1 + new Random().nextInt(12)) .forEach(timestamp -> assertThat(new Message("foo", timestamp).getTimestamp()).isEqualTo(timestamp)); long lessThanZero = -1 - new Random().nextInt(33); expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage("Text can't be less than 0"); new Message("bar", lessThanZero); }
copiedRecordingLog.entries().stream().mapToLong(e -> e.recordingId).forEach(recordingIds::add); try (Stream<Path> segments = Files.list(archiveDataDir.toPath()) .filter((p) -> p.getFileName().toString().endsWith(".rec")))
@Test public void testBasicOps() { client1 = cacheManager.createCache("basicops" + cacheConsistency.name(), configuration); assertThat(sor.isEmpty(), is(true)); Set<Long> keys = new HashSet<>(); ThreadLocalRandom.current().longs(10).forEach(x -> { keys.add(x); client1.put(x, Long.toString(x)); }); assertThat(sor.size(), is(10)); CacheManager anotherCacheManager = newCacheManager(); Cache<Long, String> client2 = anotherCacheManager.createCache("basicops" + cacheConsistency.name(), getCacheConfig()); Map<Long, String> all = client2.getAll(keys); assertThat(all.keySet(), containsInAnyOrder(keys.toArray())); keys.stream().limit(3).forEach(client2::remove); assertThat(sor.size(), is(7)); }
@Test(timeout=180000) public void testCRUD() throws Exception { Random random = new Random(); LongStream longStream = random.longs(1000); Set<Long> added = new HashSet<>(); longStream.forEach(x -> { CACHE1.put(x, new BlobValue()); added.add(x); }); Set<Long> readKeysByCache2BeforeFailOver = new HashSet<>(); added.forEach(x -> { if (CACHE2.get(x) != null) { readKeysByCache2BeforeFailOver.add(x); } }); CLUSTER.getClusterControl().terminateActive(); Set<Long> readKeysByCache1AfterFailOver = new HashSet<>(); added.forEach(x -> { if (CACHE1.get(x) != null) { readKeysByCache1AfterFailOver.add(x); } }); assertThat(readKeysByCache2BeforeFailOver.size(), greaterThanOrEqualTo(readKeysByCache1AfterFailOver.size())); readKeysByCache1AfterFailOver.stream().filter(readKeysByCache2BeforeFailOver::contains).forEach(y -> assertThat(CACHE2.get(y), notNullValue())); }
@Test public void testCASOps() { client1 = cacheManager.createCache("casops" + cacheConsistency.name(), configuration); assertThat(sor.isEmpty(), is(true)); Set<Long> keys = new HashSet<>(); ThreadLocalRandom.current().longs(10).forEach(x -> { keys.add(x); client1.put(x, Long.toString(x)); }); assertThat(sor.size(), is(10)); CacheManager anotherCacheManager = newCacheManager(); Cache<Long, String> client2 = anotherCacheManager.createCache("casops" + cacheConsistency.name(), getCacheConfig()); keys.forEach(x -> assertThat(client2.putIfAbsent(x, "Again" + x), is(Long.toString(x)))); assertThat(sor.size(), is(10)); keys.stream().limit(5).forEach(x -> assertThat(client2.replace(x , "Replaced" + x), is(Long.toString(x)))); assertThat(sor.size(), is(10)); keys.forEach(x -> client1.remove(x, Long.toString(x))); assertThat(sor.size(), is(5)); AtomicInteger success = new AtomicInteger(0); keys.forEach(x -> { if (client2.replace(x, "Replaced" + x, "Again")) { success.incrementAndGet(); } }); assertThat(success.get(), is(5)); }
@Test(timeout=180000) public void testClear() throws Exception { List<Cache<Long, BlobValue>> caches = new ArrayList<>(); caches.add(CACHE1); caches.add(CACHE2); Map<Long, BlobValue> entriesMap = new HashMap<>(); Random random = new Random(); LongStream longStream = random.longs(1000); longStream.forEach(x -> entriesMap.put(x, new BlobValue())); caches.forEach(cache -> cache.putAll(entriesMap)); Set<Long> keySet = entriesMap.keySet(); Set<Long> readKeysByCache2BeforeFailOver = new HashSet<>(); keySet.forEach(x -> { if (CACHE2.get(x) != null) { readKeysByCache2BeforeFailOver.add(x); } }); CACHE1.clear(); CLUSTER.getClusterControl().terminateActive(); if (cacheConsistency == Consistency.STRONG) { readKeysByCache2BeforeFailOver.forEach(x -> assertThat(CACHE2.get(x), nullValue())); } else { readKeysByCache2BeforeFailOver.forEach(x -> assertThat(CACHE1.get(x), nullValue())); } }
@Test public void cacheOpsDuringReconnection() throws Exception { try { Cache<Long, String> cache = cacheManager.createCache("clustered-cache", config); CompletableFuture<Void> future = CompletableFuture.runAsync(() -> ThreadLocalRandom.current() .longs() .forEach(value -> cache.put(value, Long.toString(value)))); expireLease(); try { future.get(5000, TimeUnit.MILLISECONDS); fail(); } catch (ExecutionException e) { assertThat(e.getCause().getCause().getCause(), instanceOf(ReconnectInProgressException.class)); } CompletableFuture<Void> getSucceededFuture = CompletableFuture.runAsync(() -> { while (true) { try { cache.get(1L); break; } catch (RuntimeException e) { } } }); getSucceededFuture.get(20000, TimeUnit.MILLISECONDS); } finally { cacheManager.destroyCache("clustered-cache"); } }