@Override public Observable<MarkedEdge> loadEdgesToTargetByType( final SearchByIdType search ) { final Observable<MarkedEdge> edges = Observable.create( new ObservableIterator<MarkedEdge>( "loadEdgesToTargetByType" ) { @Override protected Iterator<MarkedEdge> getIterator() { return storageEdgeSerialization.getEdgesToTargetBySourceType( scope, search ); } } ).buffer( graphFig.getScanPageSize() ) .compose( new EdgeBufferFilter( search.filterMarked() ) ); return ObservableTimer.time( edges, loadEdgesToTargetByTypeTimer ); }
@Override public Observable<MarkedEdge> loadEdgesToTarget( final SearchByEdgeType search ) { final Observable<MarkedEdge> edges = Observable.create( new ObservableIterator<MarkedEdge>( "loadEdgesToTarget" ) { @Override protected Iterator<MarkedEdge> getIterator() { return storageEdgeSerialization.getEdgesToTarget( scope, search ); } } ).buffer( graphFig.getScanPageSize() ) .compose( new EdgeBufferFilter( search.filterMarked() ) ); return ObservableTimer.time( edges, loadEdgesToTargetTimer ); }
@Override public Observable<MarkedEdge> loadEdgesFromSourceByType( final SearchByIdType search ) { final Observable<MarkedEdge> edges = Observable.create( new ObservableIterator<MarkedEdge>( "loadEdgesFromSourceByType" ) { @Override protected Iterator<MarkedEdge> getIterator() { return storageEdgeSerialization.getEdgesFromSourceByTargetType( scope, search ); } } ).buffer( graphFig.getScanPageSize() ) .compose( new EdgeBufferFilter( search.filterMarked() ) ); return ObservableTimer.time( edges, loadEdgesFromSourceByTypeTimer ); }
@Override public Observable<MarkedEdge> loadEdgeVersions( final SearchByEdge searchByEdge ) { final Observable<MarkedEdge> edges = Observable.create( new ObservableIterator<MarkedEdge>( "getEdgeTypesFromSource" ) { @Override protected Iterator<MarkedEdge> getIterator() { return storageEdgeSerialization.getEdgeVersions( scope, searchByEdge ); } } ).buffer( graphFig.getScanPageSize() ) .compose( new EdgeBufferFilter( searchByEdge.filterMarked() ) ); return ObservableTimer.time( edges, loadEdgesVersionsTimer ); }
@Override public Observable<MarkedEdge> loadEdgesFromSource( final SearchByEdgeType search ) { final Observable<MarkedEdge> edges = Observable.create( new ObservableIterator<MarkedEdge>( "loadEdgesFromSource" ) { @Override protected Iterator<MarkedEdge> getIterator() { return storageEdgeSerialization.getEdgesFromSource( scope, search ); } } ).buffer( graphFig.getScanPageSize() ) .compose( new EdgeBufferFilter( search.filterMarked() ) ); return ObservableTimer.time( edges, loadEdgesFromSourceTimer ); }
@Override public Observable<FilterResult<Entity>> call( final Observable<FilterResult<Id>> filterResultObservable ) { final ApplicationScope applicationScope = pipelineContext.getApplicationScope(); final EntityCollectionManager entityCollectionManager = entityCollectionManagerFactory.createCollectionManager( applicationScope ); //it's more efficient to make 1 network hop to get everything, then drop our results if required final Observable<FilterResult<Entity>> entityObservable = filterResultObservable.buffer( pipelineContext.getLimit() ).flatMap( bufferedIds -> { if (logger.isTraceEnabled()) { logger.trace("Attempting to batch load ids {}", bufferedIds); } final Observable<EntitySet> entitySetObservable = Observable.from( bufferedIds ).map( filterResultId -> filterResultId.getValue() ).toList() .flatMap( ids -> entityCollectionManager.load( ids ) ); //now we have a collection, validate our candidate set is correct. GraphManager graphManager = graphManagerFactory.createEdgeManager(applicationScope); return entitySetObservable.map( entitySet -> new EntityVerifier( applicationScope, graphManager, entitySet, bufferedIds, readRepairFig ) ) .doOnNext( entityCollector -> entityCollector.merge() ).flatMap( entityCollector -> Observable.from( entityCollector.getResults() ) ); } ); return entityObservable; }
@Override public Observable<ResultsPage<T>> call( final Observable<FilterResult<T>> filterResultObservable ) { final int limit = pipelineContext.getLimit(); return filterResultObservable .buffer( limit ) .flatMap( buffer -> Observable .from( buffer ) .collect(() -> new ResultsPageWithCursorCollector( limit ), ( collector, element ) -> collector.add( element ) ) ) .map( resultsPageCollector -> new ResultsPage( resultsPageCollector.results, new ResponseCursor( resultsPageCollector.lastPath ), pipelineContext.getLimit() ) ); }
.buffer( indexProcessorFig.getCollectionDeleteBufferSize()) .doOnNext( edgeScopes -> { logger.info("Sending batch of {} to be deleted.", edgeScopes.size());
return edgesFromSourceObservable.edgesFromSourceDescending( gm, graphNode.entryNode, true).buffer( 1000 ) .doOnNext( edges -> { final MutationBatch batch = keyspace.prepareMutationBatch();
.buffer(250, TimeUnit.MILLISECONDS, indexFig.getIndexBatchSize())
@Test() public void testSequence2(){ ArrayList listReturn = Observable.range(0, 2).buffer(2).flatMap(i -> Observable.empty()) .collect(()->new ArrayList(),(list,i) ->{ list.add(i); }).toBlocking().lastOrDefault(null); Assert.assertEquals(listReturn,new ArrayList<Integer>()); }
@Override public Observable<FilterResult<Id>> call( final Observable<FilterResult<Candidate>> filterResultObservable ) { /** * A bit kludgy from old 1.0 -> 2.0 apis. Refactor this as we clean up our lower levels and create new results * objects */ final ApplicationScope applicationScope = pipelineContext.getApplicationScope(); final EntityCollectionManager entityCollectionManager = entityCollectionManagerFactory.createCollectionManager( applicationScope ); final EntityIndex applicationIndex = entityIndexFactory.createEntityIndex(indexLocationStrategyFactory.getIndexLocationStrategy(applicationScope)); final Observable<FilterResult<Id>> searchIdSetObservable = filterResultObservable.buffer( pipelineContext.getLimit() ).flatMap( candidateResults -> { //flatten toa list of ids to load final Observable<List<Id>> candidateIds = Observable.from( candidateResults ).map( candidate -> candidate.getValue().getCandidateResult().getId() ).toList(); //load the ids final Observable<VersionSet> versionSetObservable = candidateIds.flatMap( ids -> entityCollectionManager.getLatestVersion( ids ) ); //now we have a collection, validate our canidate set is correct. return versionSetObservable.map( entitySet -> new EntityCollector( applicationIndex.createBatch(), entitySet, candidateResults, indexProducer ) ).doOnNext( entityCollector -> entityCollector.merge() ).flatMap( entityCollector -> Observable.from( entityCollector.collectResults() ) ); } ); return searchIdSetObservable; }
collectionIoEventObservable.buffer( serializationFig.getBufferSize() ).flatMap( buffer -> Observable.from( buffer ).collect( () -> keyspace.prepareMutationBatch(), ( ( mutationBatch, mvccLogEntryCollectionIoEvent ) -> {
@Override public Observable<IndexOperationMessage> indexEntity( final ApplicationScope applicationScope, final Entity entity ) { //bootstrap the lower modules from their caches final GraphManager gm = graphManagerFactory.createEdgeManager( applicationScope ); final EntityIndex ei = entityIndexFactory.createEntityIndex(indexLocationStrategyFactory.getIndexLocationStrategy(applicationScope)); final Id entityId = entity.getId(); //we always index in the target scope final Observable<Edge> edgesToTarget = edgesObservable.edgesToTarget( gm, entityId, true); //we may have to index we're indexing from source->target here final Observable<IndexEdge> sourceEdgesToIndex = edgesToTarget.map( edge -> generateScopeFromSource( edge ) ); //do our observable for batching //try to send a whole batch if we can final Observable<IndexOperationMessage> batches = sourceEdgesToIndex .buffer(indexFig.getIndexBatchSize() ) //map into batches based on our buffer size .flatMap( buffer -> Observable.from( buffer ) //collect results into a single batch .collect( () -> ei.createBatch(), ( batch, indexEdge ) -> { if (logger.isDebugEnabled()) { logger.debug("adding edge {} to batch for entity {}", indexEdge, entity); } final Optional<Set<String>> fieldsToIndex = getFilteredStringObjectMap( indexEdge ); batch.index( indexEdge, entity ,fieldsToIndex); } ) //return the future from the batch execution .map( batch -> batch.build() ) ); return ObservableTimer.time( batches, indexTimer ); }
} ).buffer( 100 ).flatMap( entityIds -> { return ecm.load( entityIds ); } )
.buffer( serializationFig.getBufferSize() )
@Test public void testSemaphoreIsolatedResponseFromCache() throws Exception { CountDownLatch commandLatch = new CountDownLatch(1); CountDownLatch threadPoolLatch = new CountDownLatch(1); Subscriber<List<HystrixCommandCompletion>> commandListSubscriber = getLatchedSubscriber(commandLatch); readCommandStream.observe().buffer(500, TimeUnit.MILLISECONDS).take(1) .doOnNext(new Action1<List<HystrixCommandCompletion>>() { @Override public void call(List<HystrixCommandCompletion> hystrixCommandCompletions) { System.out.println("LIST : " + hystrixCommandCompletions); assertEquals(3, hystrixCommandCompletions.size()); } }) .subscribe(commandListSubscriber); Subscriber<HystrixCommandCompletion> threadPoolSubscriber = getLatchedSubscriber(threadPoolLatch); readThreadPoolStream.observe().take(1).subscribe(threadPoolSubscriber); ExecutionResult result = ExecutionResult.from(HystrixEventType.SUCCESS); ExecutionResult cache1 = ExecutionResult.from(HystrixEventType.RESPONSE_FROM_CACHE); ExecutionResult cache2 = ExecutionResult.from(HystrixEventType.RESPONSE_FROM_CACHE); writeToStream.executionDone(result, commandKey, threadPoolKey); writeToStream.executionDone(cache1, commandKey, threadPoolKey); writeToStream.executionDone(cache2, commandKey, threadPoolKey); assertTrue(commandLatch.await(1000, TimeUnit.MILLISECONDS)); assertFalse(threadPoolLatch.await(1000, TimeUnit.MILLISECONDS)); }
int returned = Observable.merge( input1, input2 ).buffer( 1000 ) .flatMap( new Func1<List<Integer>, Observable<Integer>>() { @Override
@Test public void testThreadIsolatedResponseFromCache() throws Exception { CountDownLatch commandLatch = new CountDownLatch(1); CountDownLatch threadPoolLatch = new CountDownLatch(1); Subscriber<List<HystrixCommandCompletion>> commandListSubscriber = getLatchedSubscriber(commandLatch); readCommandStream.observe().buffer(500, TimeUnit.MILLISECONDS).take(1) .doOnNext(new Action1<List<HystrixCommandCompletion>>() { @Override public void call(List<HystrixCommandCompletion> hystrixCommandCompletions) { System.out.println("LIST : " + hystrixCommandCompletions); assertEquals(3, hystrixCommandCompletions.size()); } }) .subscribe(commandListSubscriber); Subscriber<HystrixCommandCompletion> threadPoolSubscriber = getLatchedSubscriber(threadPoolLatch); readThreadPoolStream.observe().take(1).subscribe(threadPoolSubscriber); ExecutionResult result = ExecutionResult.from(HystrixEventType.SUCCESS).setExecutedInThread(); ExecutionResult cache1 = ExecutionResult.from(HystrixEventType.RESPONSE_FROM_CACHE); ExecutionResult cache2 = ExecutionResult.from(HystrixEventType.RESPONSE_FROM_CACHE); writeToStream.executionDone(result, commandKey, threadPoolKey); writeToStream.executionDone(cache1, commandKey, threadPoolKey); writeToStream.executionDone(cache2, commandKey, threadPoolKey); assertTrue(commandLatch.await(1000, TimeUnit.MILLISECONDS)); assertTrue(threadPoolLatch.await(1000, TimeUnit.MILLISECONDS)); }
generator.doSearch( manager ).take( readCount ).buffer( 1000 ).subscribe( new Subscriber<List<MarkedEdge>>() { @Override public void onCompleted() {