/** * Caches a relationship group into this cache, it will be cached if the * {@link RelationshipGroupRecord#getOwningNode() owner} is within the {@link #prepare(long) prepared} range, * where {@code true} will be returned, otherwise {@code false}. * * @param groupRecord {@link RelationshipGroupRecord} to cache. * @return whether or not the group was cached, i.e. whether or not it was within the prepared range. */ public boolean put( RelationshipGroupRecord groupRecord ) { long nodeId = groupRecord.getOwningNode(); assert nodeId < highNodeId; if ( nodeId < fromNodeId || nodeId >= toNodeId ) { return false; } long baseIndex = offsets.get( rebase( nodeId ) ); // grouCount is extra validation, really int groupCount = groupCount( nodeId ); long index = scanForFreeFrom( baseIndex, groupCount, groupRecord.getType(), nodeId ); // Put the group at this index cache.setByte( index, 0, (byte) 1 ); cache.set3ByteInt( index, 1, groupRecord.getType() ); cache.set6ByteLong( index, 1 + 3, groupRecord.getFirstOut() ); cache.set6ByteLong( index, 1 + 3 + 6, groupRecord.getFirstIn() ); cache.set6ByteLong( index, 1 + 3 + 6 + 6, groupRecord.getFirstLoop() ); return true; }
private void setCount( RelationshipGroupCache cache, int nodeId, int count ) { for ( int i = 0; i < count; i++ ) { cache.incrementGroupCount( nodeId ); } } }
@Override protected void process( RelationshipGroupRecord[] batch, BatchSender sender ) { // These records are read page-wise forwards, but should be cached in reverse // since the records exists in the store in reverse order. for ( int i = batch.length - 1; i >= 0; i-- ) { RelationshipGroupRecord record = batch[i]; if ( record.inUse() ) { cache.put( record ); } } } }
/** * Looks at max amount of configured memory (in constructor) and figures out for how many nodes their groups * can be cached. Before the first call to this method all {@link #incrementGroupCount(long)} calls * must have been made. After a call to this there should be a sequence of {@link #put(RelationshipGroupRecord)} * calls to cache the groups. If this call returns a node id which is lower than the highest node id in the * store then more rounds of caching should be performed after completing this round. * * @param fromNodeId inclusive * @return toNodeId exclusive */ public long prepare( long fromNodeId ) { cache.clear(); // this will have all the "first" bytes set to 0, which means !inUse this.fromNodeId = fromNodeId; // keep for use in put later on highCacheId = 0; for ( long nodeId = fromNodeId; nodeId < highNodeId; nodeId++ ) { int count = groupCount( nodeId ); if ( highCacheId + count > maxCacheLength ) { // Cannot include this one, so up until the previous is good return this.toNodeId = nodeId; } offsets.set( rebase( nodeId ), highCacheId ); highCacheId += count; } return this.toNodeId = highNodeId; }
@Test public void shouldHandleGroupCountBeyondSignedShortRange() { // GIVEN long nodeId = 0; int limit = Short.MAX_VALUE + 10; RelationshipGroupCache cache = new RelationshipGroupCache( HEAP, ByteUnit.kibiBytes( 100 ), nodeId + 1 ); // WHEN first counting all groups per node for ( int type = 0; type < limit; type++ ) { cache.incrementGroupCount( nodeId ); } // and WHEN later putting group records into the cache RelationshipGroupRecord group = new RelationshipGroupRecord( -1 ); group.setOwningNode( nodeId ); for ( int type = 0; type < limit; type++ ) { group.setId( type ); group.setFirstOut( type ); // just some relationship group.setType( type ); cache.put( group ); } long prepared = cache.prepare( nodeId ); // THEN that should work, because it used to fail inside prepare, but we can also ask // the groupCount method to be sure assertEquals( nodeId, prepared ); assertEquals( limit, cache.groupCount( nodeId ) ); }
@Test public void shouldNotFindSpaceToPutMoreGroupsThanSpecifiedForANode() { // GIVEN int nodeCount = 10; RelationshipGroupCache cache = new RelationshipGroupCache( HEAP, ByteUnit.kibiBytes( 4 ), nodeCount ); setCount( cache, 1, 7 ); assertEquals( nodeCount, cache.prepare( 0 ) ); // WHEN for ( int i = 0; i < 7; i++ ) { cache.put( new RelationshipGroupRecord( i + 1 ).initialize( true, i, -1, -1, -1, 1, -1 ) ); } try { cache.put( new RelationshipGroupRecord( 8 ).initialize( true, 8, -1, -1, -1, 1, -1 ) ); fail( "Should have failed" ); } catch ( IllegalStateException e ) { // Good } }
public void run( long memoryWeCanHoldForCertain, BatchingNeoStores neoStore, long highNodeId ) new RelationshipGroupCache( numberArrayFactory, memoryWeCanHoldForCertain, highNodeId ) ) toNodeId = groupCache.prepare( fromNodeId ); monitor.defragmentingNodeRange( fromNodeId, toNodeId ); ByteArray groupCountCache = groupCache.getGroupCountCache(); groupCountCache.clear(); Configuration nodeConfig = withBatchSize( config, neoStore.getNodeStore().getRecordsPerPage() );
/** * Before caching any relationship groups all group counts for all nodes are incremented by calling * this method once for every encountered group (its node id). * * @param nodeId node to increment group count for. */ public void incrementGroupCount( long nodeId ) { int count = groupCount( nodeId ); count++; if ( (count & ~0xFFFF) != 0 ) { throw new IllegalStateException( "Invalid number of relationship groups for node " + nodeId + " " + count ); } groupCountCache.setShort( nodeId, 0, (short) count ); }
public WriteGroupsStage( Configuration config, RelationshipGroupCache cache, RecordStore<RelationshipGroupRecord> store ) { super( NAME, null, config, 0 ); add( new ReadGroupsFromCacheStep( control(), config, cache.iterator(), GROUP_ENTRY_SIZE ) ); add( new EncodeGroupsStep( control(), config, store ) ); add( new UpdateRecordsStep<>( control(), config, store, new StorePrepareIdSequence() ) ); } }
moveRight( desiredIndex, freeIndex ); return desiredIndex;
RelationshipGroupCache cache = new RelationshipGroupCache( HEAP, ByteUnit.kibiBytes( 4 ), nodeCount ); int[] counts = new int[nodeCount]; for ( int nodeId = 0; nodeId < counts.length; nodeId++ ) long toNodeId = cache.prepare( 0 ); assertTrue( toNodeId < nodeCount ); if ( cache.put( new RelationshipGroupRecord( nodeId ) .initialize( true, typeId, -1, -1, -1, nodeId, -1 ) ) )
public void run( long memoryWeCanHoldForCertain, BatchingNeoStores neoStore, long highNodeId ) new RelationshipGroupCache( numberArrayFactory, memoryWeCanHoldForCertain, highNodeId ) ) toNodeId = groupCache.prepare( fromNodeId ); monitor.defragmentingNodeRange( fromNodeId, toNodeId ); ByteArray groupCountCache = groupCache.getGroupCountCache(); groupCountCache.clear(); Configuration nodeConfig = withBatchSize( config, neoStore.getNodeStore().getRecordsPerPage() );
private void findNextNodeWithGroupsIfNeeded() { if ( countLeftForThisNode == 0 ) { do { nodeId++; countLeftForThisNode = nodeId >= groupCountCache.length() ? 0 : groupCount( nodeId ); } while ( countLeftForThisNode == 0 && nodeId < groupCountCache.length() ); } } };
/** * Looks at max amount of configured memory (in constructor) and figures out for how many nodes their groups * can be cached. Before the first call to this method all {@link #incrementGroupCount(long)} calls * must have been made. After a call to this there should be a sequence of {@link #put(RelationshipGroupRecord)} * calls to cache the groups. If this call returns a node id which is lower than the highest node id in the * store then more rounds of caching should be performed after completing this round. * * @param fromNodeId inclusive * @return toNodeId exclusive */ public long prepare( long fromNodeId ) { cache.clear(); // this will have all the "first" bytes set to 0, which means !inUse this.fromNodeId = fromNodeId; // keep for use in put later on highCacheId = 0; for ( long nodeId = fromNodeId; nodeId < highNodeId; nodeId++ ) { int count = groupCount( nodeId ); if ( highCacheId + count > maxCacheLength ) { // Cannot include this one, so up until the previous is good return this.toNodeId = nodeId; } offsets.set( rebase( nodeId ), highCacheId ); highCacheId += count; } return this.toNodeId = highNodeId; }
public WriteGroupsStage( Configuration config, RelationshipGroupCache cache, RecordStore<RelationshipGroupRecord> store ) { super( NAME, null, config, 0 ); add( new ReadGroupsFromCacheStep( control(), config, cache.iterator(), GROUP_ENTRY_SIZE ) ); add( new EncodeGroupsStep( control(), config, store ) ); add( new UpdateRecordsStep<>( control(), config, store, new StorePrepareIdSequence() ) ); } }
moveRight( desiredIndex, freeIndex ); return desiredIndex;
RelationshipGroupCache cache = new RelationshipGroupCache( HEAP, ByteUnit.kibiBytes( 40 ), nodeCount ); int[] counts = new int[nodeCount]; int groupCount = 0; groupCount += counts[nodeId]; assertEquals( nodeCount, cache.prepare( 0 ) ); boolean thereAreMoreGroups = true; int cachedCount = 0; if ( cache.put( new RelationshipGroupRecord( nodeId ) .initialize( true, typeId, -1, -1, -1, nodeId, -1 ) ) )
/** * Caches a relationship group into this cache, it will be cached if the * {@link RelationshipGroupRecord#getOwningNode() owner} is within the {@link #prepare(long) prepared} range, * where {@code true} will be returned, otherwise {@code false}. * * @param groupRecord {@link RelationshipGroupRecord} to cache. * @return whether or not the group was cached, i.e. whether or not it was within the prepared range. */ public boolean put( RelationshipGroupRecord groupRecord ) { long nodeId = groupRecord.getOwningNode(); assert nodeId < highNodeId; if ( nodeId < fromNodeId || nodeId >= toNodeId ) { return false; } long baseIndex = offsets.get( rebase( nodeId ) ); // grouCount is extra validation, really int groupCount = groupCount( nodeId ); long index = scanForFreeFrom( baseIndex, groupCount, groupRecord.getType(), nodeId ); // Put the group at this index cache.setByte( index, 0, (byte) 1 ); cache.set3ByteInt( index, 1, groupRecord.getType() ); cache.set6ByteLong( index, 1 + 3, groupRecord.getFirstOut() ); cache.set6ByteLong( index, 1 + 3 + 6, groupRecord.getFirstIn() ); cache.set6ByteLong( index, 1 + 3 + 6 + 6, groupRecord.getFirstLoop() ); return true; }
private int countLeftForThisNode = groupCount( nodeId );
@Override protected void process( RelationshipGroupRecord[] batch, BatchSender sender ) { for ( RelationshipGroupRecord record : batch ) { if ( record.inUse() ) { cache.incrementGroupCount( record.getOwningNode() ); } } } }