private byte[] newRecordBuffer() { return new byte[schemaStore.getRecordSize() * 4]; } }
@Override public int getRecordSize() { return actual.getRecordSize(); }
@Override protected long position() { return store.getHighId() * store.getRecordSize(); } }
public UpdateRecordsStep( StageControl control, Configuration config, RecordStore<RECORD> store, PrepareIdSequence prepareIdSequence ) { super( control, "v", config, config.parallelRecordWrites() ? 0 : 1 ); this.store = store; this.prepareIdSequence = prepareIdSequence; this.recordSize = store.getRecordSize(); }
public static RecordStore<DynamicRecord> configureDynamicStore( int blockSize ) { @SuppressWarnings( "unchecked" ) RecordStore<DynamicRecord> mock = mock( RecordStore.class ); when( mock.getRecordSize() ).thenReturn( blockSize + DynamicRecordFormat.RECORD_HEADER_SIZE ); when( mock.getRecordDataSize() ).thenReturn( blockSize ); return mock; } }
@Override public Collection<StoreFileMetadata> listStorageFiles() { List<StoreFileMetadata> files = new ArrayList<>(); for ( StoreType type : StoreType.values() ) { if ( type.equals( StoreType.COUNTS ) ) { addCountStoreFiles( files ); } else { final RecordStore<AbstractBaseRecord> recordStore = neoStores.getRecordStore( type ); StoreFileMetadata metadata = new StoreFileMetadata( recordStore.getStorageFile(), recordStore.getRecordSize() ); files.add( metadata ); } } return files; }
@Test public void ioThroughputStatDoesNotOverflow() { // store with huge record size to force overflow and not create huge batch of records RecordStore<NodeRecord> store = mock( RecordStore.class ); when( store.getRecordSize() ).thenReturn( Integer.MAX_VALUE / 2 ); Configuration configuration = mock( Configuration.class ); StageControl stageControl = mock( StageControl.class ); UpdateRecordsStep<NodeRecord> step = new UpdateRecordsStep<>( stageControl, configuration, store, new StorePrepareIdSequence() ); NodeRecord record = new NodeRecord( 1 ); record.setInUse( true ); NodeRecord[] batch = new NodeRecord[11]; Arrays.fill( batch, record ); step.process( batch, mock( BatchSender.class ) ); Stat stat = step.stat( Keys.io_throughput ); assertThat( stat.asLong(), greaterThan( 0L ) ); }
public CountGroupsStage( Configuration config, RecordStore<RelationshipGroupRecord> store, RelationshipGroupCache groupCache, StatsProvider... additionalStatsProviders ) { super( NAME, null, config, RECYCLE_BATCHES ); add( new BatchFeedStep( control(), config, allIn( store, config ), store.getRecordSize() ) ); add( new ReadRecordsStep<>( control(), config, false, store ) ); add( new CountGroupsStep( control(), config, groupCache, additionalStatsProviders ) ); } }
public ScanAndCacheGroupsStage( Configuration config, RecordStore<RelationshipGroupRecord> store, RelationshipGroupCache cache, StatsProvider... additionalStatsProviders ) { super( NAME, null, config, RECYCLE_BATCHES ); add( new BatchFeedStep( control(), config, allInReversed( store, config ), store.getRecordSize() ) ); add( new ReadRecordsStep<>( control(), config, false, store ) ); add( new CacheGroupsStep( control(), config, cache, additionalStatsProviders ) ); } }
public NodeFirstGroupStage( Configuration config, RecordStore<RelationshipGroupRecord> groupStore, NodeStore nodeStore, ByteArray cache ) { super( NAME, null, config, 0 ); add( new BatchFeedStep( control(), config, allIn( groupStore, config ), groupStore.getRecordSize() ) ); add( new ReadRecordsStep<>( control(), config, true, groupStore ) ); add( new NodeSetFirstGroupStep( control(), config, nodeStore, cache ) ); add( new UpdateRecordsStep<>( control(), config, nodeStore, new StorePrepareIdSequence() ) ); } }
private byte[] newRecordBuffer() { return new byte[schemaStore.getRecordSize() * 4]; } }
@Override public int getRecordSize() { return actual.getRecordSize(); }
@Override protected long position() { return store.getHighId() * store.getRecordSize(); } }
public UpdateRecordsStep( StageControl control, Configuration config, RecordStore<RECORD> store, PrepareIdSequence prepareIdSequence ) { super( control, "v", config, config.parallelRecordWrites() ? 0 : 1 ); this.store = store; this.prepareIdSequence = prepareIdSequence; this.recordSize = store.getRecordSize(); }
byte[] recData = null; int len; RecordStore rs = RecordStore.openRecordStore("StoryDataBase1", true); if (rs.getNumRecords() > 0) { recData = new byte[rs.getRecordSize(1)]; len = rs.getRecord(1, recData, 0); String value = new String(recData, 0, len); if(value == null) { .... } else { ... } }
public static RecordStore<DynamicRecord> configureDynamicStore( int blockSize ) { @SuppressWarnings( "unchecked" ) RecordStore<DynamicRecord> mock = mock( RecordStore.class ); when( mock.getRecordSize() ).thenReturn( blockSize + AbstractDynamicStore.BLOCK_HEADER_SIZE ); when( mock.getRecordHeaderSize() ).thenReturn( AbstractDynamicStore.BLOCK_HEADER_SIZE ); return mock; } }
@SuppressWarnings("unchecked") private StoreAccess storeAccess( long highId, int recordSize ) { StoreAccess storeAccess = mock( StoreAccess.class ); RecordStore recordStore = mock( RecordStore.class ); when( multiPassStore().getRecordStore( storeAccess ) ).thenReturn( recordStore ); when( recordStore.getHighId() ).thenReturn( highId ); when( recordStore.getRecordSize() ).thenReturn( recordSize ); return storeAccess; }
public ScanAndCacheGroupsStage( Configuration config, RecordStore<RelationshipGroupRecord> store, RelationshipGroupCache cache, StatsProvider... additionalStatsProviders ) { super( NAME, null, config, RECYCLE_BATCHES ); add( new BatchFeedStep( control(), config, allInReversed( store, config ), store.getRecordSize() ) ); add( new ReadRecordsStep<>( control(), config, false, store ) ); add( new CacheGroupsStep( control(), config, cache, additionalStatsProviders ) ); } }
public CountGroupsStage( Configuration config, RecordStore<RelationshipGroupRecord> store, RelationshipGroupCache groupCache, StatsProvider... additionalStatsProviders ) { super( NAME, null, config, RECYCLE_BATCHES ); add( new BatchFeedStep( control(), config, allIn( store, config ), store.getRecordSize() ) ); add( new ReadRecordsStep<>( control(), config, false, store ) ); add( new CountGroupsStep( control(), config, groupCache, additionalStatsProviders ) ); } }
public NodeFirstGroupStage( Configuration config, RecordStore<RelationshipGroupRecord> groupStore, NodeStore nodeStore, ByteArray cache ) { super( NAME, null, config, 0 ); add( new BatchFeedStep( control(), config, allIn( groupStore, config ), groupStore.getRecordSize() ) ); add( new ReadRecordsStep<>( control(), config, true, groupStore ) ); add( new NodeSetFirstGroupStep( control(), config, nodeStore, cache ) ); add( new UpdateRecordsStep<>( control(), config, nodeStore, new StorePrepareIdSequence() ) ); } }