@Override public void awaitIndexesOnline( long duration, TimeUnit unit ) { actions.assertInOpenTransaction(); long millisLeft = TimeUnit.MILLISECONDS.convert( duration, unit ); Collection<IndexDefinition> onlineIndexes = new ArrayList<>(); for ( Iterator<IndexDefinition> iter = getIndexes().iterator(); iter.hasNext(); ) { if ( millisLeft < 0 ) { throw new IllegalStateException( "Expected all indexes to come online within a reasonable time." + "Indexes brought online: " + onlineIndexes + ". Indexes not guaranteed to be online: " + asCollection( iter ) ); } IndexDefinition index = iter.next(); long millisBefore = System.currentTimeMillis(); awaitIndexOnline( index, millisLeft, TimeUnit.MILLISECONDS ); millisLeft -= System.currentTimeMillis() - millisBefore; onlineIndexes.add( index ); } }
@Override public boolean matchesSafely( IndexHits<T> indexHits ) { Collection<T> collection = Iterators.asCollection( indexHits.iterator() ); if ( expectedItems.length != collection.size() ) { message = "IndexHits with a size of " + expectedItems.length + ", got one with " + collection.size(); message += collection.toString(); return false; } for ( T item : expectedItems ) { if ( !collection.contains( item ) ) { message = "Item (" + item + ") not found."; return false; } } return true; }
@Test public void testReturnSourceFromApplyWithEmptyDiffSets() { // WHEN Iterator<Long> result = diffSets.apply( singletonList( 18L ).iterator() ); // THEN assertEquals( singletonList( 18L ), asCollection( result ) ); }
@Test public void testFilterRemovedFromSourceInApply() { // GIVEN diffSets.remove( 43L ); // WHEN Iterator<Long> result = diffSets.apply( asList( 42L, 43L, 44L ).iterator() ); // THEN assertEquals( asList( 42L, 44L ), asCollection( result ) ); }
@Test public void testAppendAddedToSourceInApply() { // GIVEN diffSets.add( 52L ); diffSets.remove( 43L ); // WHEN Iterator<Long> result = diffSets.apply( singletonList( 18L ).iterator() ); // THEN assertEquals( asList( 18L, 52L ), asCollection( result ) ); }
@Test public void testFilterAddedFromSourceInApply() { // GIVEN diffSets.add( 42L ); diffSets.add( 44L ); // WHEN Iterator<Long> result = diffSets.apply( asList( 42L, 43L ).iterator() ); // THEN Collection<Long> collectedResult = asCollection( result ); assertEquals( 3, collectedResult.size() ); assertThat( collectedResult, hasItems( 43L, 42L, 44L ) ); } }
@Test public void shouldSnapshotOnlineIndexes() throws Exception { // GIVEN int indexId = 1; int indexId2 = 2; StoreIndexDescriptor rule1 = storeIndex( indexId, 2, 3, PROVIDER_DESCRIPTOR ); StoreIndexDescriptor rule2 = storeIndex( indexId2, 4, 5, PROVIDER_DESCRIPTOR ); IndexAccessor indexAccessor = mock( IndexAccessor.class ); IndexingService indexing = newIndexingServiceWithMockedDependencies( mock( IndexPopulator.class ), indexAccessor, new DataUpdates( ), rule1, rule2 ); File theFile = new File( "Blah" ); when( indexAccessor.snapshotFiles()).thenAnswer( newResourceIterator( theFile ) ); when( indexProvider.getInitialState( rule1 ) ).thenReturn( ONLINE ); when( indexProvider.getInitialState( rule2 ) ).thenReturn( ONLINE ); when( storeView.indexSample( anyLong(), any( DoubleLongRegister.class ) ) ) .thenReturn( newDoubleLongRegister( 32L, 32L ) ); life.start(); // WHEN ResourceIterator<File> files = indexing.snapshotIndexFiles(); // THEN // We get a snapshot per online index assertThat( asCollection( files ), equalTo( asCollection( iterator( theFile, theFile ) ) ) ); }
@Test public void shouldNotSnapshotPopulatingIndexes() throws Exception { // GIVEN CountDownLatch populatorLatch = new CountDownLatch( 1 ); IndexAccessor indexAccessor = mock(IndexAccessor.class); int indexId = 1; int indexId2 = 2; StoreIndexDescriptor rule1 = storeIndex( indexId, 2, 3, PROVIDER_DESCRIPTOR ); StoreIndexDescriptor rule2 = storeIndex( indexId2, 4, 5, PROVIDER_DESCRIPTOR ); IndexingService indexing = newIndexingServiceWithMockedDependencies( populator, indexAccessor, new DataUpdates(), rule1, rule2 ); File theFile = new File( "Blah" ); doAnswer( waitForLatch( populatorLatch ) ).when( populator ).create(); when( indexAccessor.snapshotFiles() ).thenAnswer( newResourceIterator( theFile ) ); when( indexProvider.getInitialState( rule1 ) ).thenReturn( POPULATING ); when( indexProvider.getInitialState( rule2 ) ).thenReturn( ONLINE ); when( storeView.indexSample( anyLong(), any( DoubleLongRegister.class ) ) ).thenReturn( newDoubleLongRegister( 32L, 32L ) ); life.start(); // WHEN ResourceIterator<File> files = indexing.snapshotIndexFiles(); populatorLatch.countDown(); // only now, after the snapshot, is the population job allowed to finish waitForIndexesToComeOnline( indexing, indexId, indexId2 ); // THEN // We get a snapshot from the online index, but no snapshot from the populating one assertThat( asCollection( files ), equalTo( asCollection( iterator( theFile ) ) ) ); }
private void makeSureUncommittedChangesAreSortedCorrectly( EntityCreator<PropertyContainer> creator, TimelineIndex<PropertyContainer> timeline ) { LinkedList<Pair<PropertyContainer, Long>> timestamps = createTimestamps( creator, timeline, 300000, 100000, 500000, 900000, 800000 ); try ( Transaction tx = db.beginTx() ) { timestamps.addAll( createTimestamps( creator, timeline, 40000, 70000, 20000 ) ); assertEquals( sortedEntities( timestamps, false ), asCollection( timeline.getBetween( null, null ).iterator() ) ); tx.success(); } try ( Transaction ignore = db.beginTx() ) { assertEquals( sortedEntities( timestamps, false ), asCollection( timeline.getBetween( null, null ).iterator() ) ); } }
private void makeSureWeCanQueryLowerDefaultThan1970( EntityCreator<PropertyContainer> creator, TimelineIndex<PropertyContainer> timeline ) { LinkedList<Pair<PropertyContainer,Long>> timestamps = createTimestamps( creator, timeline, -10000, 0, 10000 ); try ( Transaction tx = db.beginTx() ) { assertEquals( sortedEntities( timestamps, true ), asCollection( timeline.getBetween( null, 10000L, true ).iterator() ) ); tx.success(); } }
private void makeSureRangesAreReturnedInCorrectReversedOrder( EntityCreator<PropertyContainer> creator, TimelineIndex<PropertyContainer> timeline ) { LinkedList<Pair<PropertyContainer, Long>> timestamps = createTimestamps( creator, timeline, 300000, 200000, 199999, 400000, 100000, 500000, 600000, 900000, 800000 ); try ( Transaction tx = db.beginTx() ) { assertEquals( sortedEntities( timestamps, true ), asCollection( timeline.getBetween( null, null, true ).iterator() ) ); tx.success(); } }
private void makeSureRangesAreReturnedInCorrectOrder( EntityCreator<PropertyContainer> creator, TimelineIndex<PropertyContainer> timeline ) { LinkedList<Pair<PropertyContainer, Long>> timestamps = createTimestamps( creator, timeline, 300000, 200000, 400000, 100000, 500000, 600000, 900000, 800000 ); try ( Transaction tx = db.beginTx() ) { assertEquals( sortedEntities( timestamps, false ), asCollection( timeline.getBetween( null, null ).iterator() ) ); tx.success(); } }
private static Collection asCollection(Object value) { if (value instanceof Collection) return (Collection) value; if (value instanceof Iterable) return Iterables.asCollection((Iterable) value); if (value instanceof Iterator) return Iterators.asCollection((Iterator) value); return Collections.singleton(value); }
@Test public void storeAndLoadAllRules() { // GIVEN long indexId = store.nextId(); long constraintId = store.nextId(); Collection<SchemaRule> rules = Arrays.asList( uniqueIndexRule( indexId, constraintId, PROVIDER_DESCRIPTOR, 2, 5, 3 ), constraintUniqueRule( constraintId, indexId, 2, 5, 3 ), indexRule( store.nextId(), PROVIDER_DESCRIPTOR, 0, 5 ), indexRule( store.nextId(), PROVIDER_DESCRIPTOR, 1, 6, 10, 99 ), constraintExistsRule( store.nextId(), 5, 1 ) ); for ( SchemaRule rule : rules ) { storeRule( rule ); } // WHEN Collection<SchemaRule> readRules = asCollection( store.loadAllSchemaRules() ); // THEN assertEquals( rules, readRules ); }
@Test public void shouldCreateDeferredSchemaIndexesInEmptyDatabase() throws Exception { // GIVEN BatchInserter inserter = newBatchInserter(); // WHEN IndexDefinition definition = inserter.createDeferredSchemaIndex( label( "Hacker" ) ).on( "handle" ).create(); // THEN assertEquals( "Hacker", single( definition.getLabels() ).name() ); assertEquals( asCollection( iterator( "handle" ) ), Iterables.asCollection( definition.getPropertyKeys() ) ); inserter.shutdown(); }
@Test public void shouldListAllLabels() throws Exception { // given Transaction transaction = newTransaction( AnonymousContext.writeToken() ); int label1Id = transaction.tokenWrite().labelGetOrCreateForName( "label1" ); int label2Id = transaction.tokenWrite().labelGetOrCreateForName( "label2" ); // when Iterator<NamedToken> labelIdsBeforeCommit = transaction.tokenRead().labelsGetAllTokens(); // then assertThat( asCollection( labelIdsBeforeCommit ), hasItems( new NamedToken( "label1", label1Id ), new NamedToken( "label2", label2Id ) ) ); // when commit(); transaction = newTransaction(); Iterator<NamedToken> labelIdsAfterCommit = transaction.tokenRead().labelsGetAllTokens(); // then assertThat( asCollection( labelIdsAfterCommit ), hasItems( new NamedToken( "label1", label1Id ), new NamedToken( "label2", label2Id ) ) ); commit(); } }
@Test public void shouldListAllPropertyKeys() throws Exception { // given dbWithNoCache(); Transaction transaction = newTransaction( AnonymousContext.writeToken() ); int prop1 = transaction.tokenWrite().propertyKeyGetOrCreateForName( "prop1" ); int prop2 = transaction.tokenWrite().propertyKeyGetOrCreateForName( "prop2" ); // when Iterator<NamedToken> propIdsBeforeCommit = transaction.tokenRead().propertyKeyGetAllTokens(); // then assertThat( asCollection( propIdsBeforeCommit ), hasItems( new NamedToken( "prop1", prop1 ), new NamedToken( "prop2", prop2 ) ) ); // when commit(); transaction = newTransaction(); Iterator<NamedToken> propIdsAfterCommit = transaction.tokenRead().propertyKeyGetAllTokens(); // then assertThat( asCollection( propIdsAfterCommit ), hasItems( new NamedToken( "prop1", prop1 ), new NamedToken( "prop2", prop2 ) ) ); commit(); }
@SuppressWarnings("unchecked") public Iterable<Map<String,Object>> search(String query) { if (query==null || query.trim().isEmpty()) return Collections.emptyList(); return Iterators.asCollection(cypher.query( "MATCH (movie:Movie)\n" + " WHERE lower(movie.title) CONTAINS {part}\n" + " RETURN movie", map("part", query.toLowerCase()))); }
@Override public void awaitIndexesOnline( long duration, TimeUnit unit ) { actions.assertInOpenTransaction(); long millisLeft = TimeUnit.MILLISECONDS.convert( duration, unit ); Collection<IndexDefinition> onlineIndexes = new ArrayList<>(); for ( Iterator<IndexDefinition> iter = getIndexes().iterator(); iter.hasNext(); ) { if ( millisLeft < 0 ) { throw new IllegalStateException( "Expected all indexes to come online within a reasonable time." + "Indexes brought online: " + onlineIndexes + ". Indexes not guaranteed to be online: " + asCollection( iter ) ); } IndexDefinition index = iter.next(); long millisBefore = System.currentTimeMillis(); awaitIndexOnline( index, millisLeft, TimeUnit.MILLISECONDS ); millisLeft -= System.currentTimeMillis() - millisBefore; onlineIndexes.add( index ); } }
private CypherResult doExecuteQuery(String query, Map<String, Object> params, boolean canProfile) { params = params == null ? Collections.<String,Object>emptyMap() : params; long time=System.currentTimeMillis(); Transaction tx = gdb.beginTx(); KernelTransaction resumeTx; try { resumeTx = suspendTx(query); Result result = canProfile ? gdb.execute("PROFILE "+query,params) : gdb.execute(query,params); final Collection<Map<String, Object>> data = Iterators.asCollection(result); time = System.currentTimeMillis() - time; resumeTransaction(resumeTx); CypherResult cypherResult = new CypherResult(result.columns(), data, result.getQueryStatistics(), time, canProfile ? result.getExecutionPlanDescription() : null, prettify(query)); tx.success(); return cypherResult; } finally { tx.close(); awaitIndexOnline(query); } }