@Override public Observable<Id> markNode( final Id node, final long timestamp ) { final Observable<Id> idObservable = Observable.just( node ).map( id -> { //mark the node as deleted final MutationBatch nodeMutation = nodeSerialization.mark( scope, id, timestamp ); if (logger.isTraceEnabled()) { logger.trace("Marking node {} as deleted to node mark", node); } try { nodeMutation.execute(); } catch ( ConnectionException e ) { throw new RuntimeException( "Unable to execute mutation", e ); } return id; } ); return ObservableTimer.time( idObservable, markNodeTimer ); }
public Observable<MarkedEdge> repair( final ApplicationScope scope, final MarkedEdge edge, final UUID timestamp ) { //merge source and target then deal with the distinct values return Observable.just( edge ).filter( markedEdge-> markedEdge.isDeleted() ) .doOnNext( markedEdge -> { //it's still in the same state as it was when we queued it. Remove it if(logger.isDebugEnabled()){ logger.debug( "Removing edge {} ", markedEdge ); } //remove from the commit log //remove from storage try { storageSerialization.deleteEdge( scope, markedEdge, timestamp ).execute(); } catch ( ConnectionException e ) { throw new RuntimeException( "Unable to connect to casandra", e ); } } ); }
protected void executeBatch(final int targetVersion, final MutationBatch batch, final ProgressObserver po, final AtomicLong count, com.datastax.driver.core.BatchStatement uniqueBatch) { try { batch.execute(); session.execute(uniqueBatch); po.update( targetVersion, "Finished copying " + count + " entities to the new format" ); } catch ( ConnectionException e ) { po.failed( targetVersion, "Failed to execute mutation in cassandra" ); throw new DataMigrationException( "Unable to migrate batches ", e ); } }
@Override public Observable<MarkedEdge> markEdge( final Edge edge ) { GraphValidation.validateEdge( edge ); final MarkedEdge markedEdge = new SimpleMarkedEdge( edge, true ); final Observable<MarkedEdge> observable = Observable.just( markedEdge ).map( edge1 -> { final UUID timestamp = UUIDGenerator.newTimeUUID(); final MutationBatch edgeMutation = storageEdgeSerialization.writeEdge( scope, edge1, timestamp ); if (logger.isTraceEnabled()) { logger.trace("Marking edge {} as deleted to commit log", edge1); } try { edgeMutation.execute(); } catch ( ConnectionException e ) { throw new RuntimeException( "Unable to execute mutation", e ); } return edge1; } ); return ObservableTimer.time( observable, markEdgeTimer ); }
.putColumn("lastmodified", System.currentTimeMillis(), null) .putColumn("descriptor", json, null); mutation.execute(); log.info("Wrote index to C* in [%s] ms", System.currentTimeMillis() - start);
@Override public Observable<MarkedEdge> writeEdge( final Edge edge ) { GraphValidation.validateEdge( edge ); final MarkedEdge markedEdge = new SimpleMarkedEdge( edge, false ); final Observable<MarkedEdge> observable = Observable.just( markedEdge ).map( edge1 -> { final UUID timestamp = UUIDGenerator.newTimeUUID(); final MutationBatch mutation = edgeMetadataSerialization.writeEdge( scope, edge1 ); final MutationBatch edgeMutation = storageEdgeSerialization.writeEdge( scope, edge1, timestamp ); mutation.mergeShallow( edgeMutation ); try { mutation.execute(); } catch ( ConnectionException e ) { throw new RuntimeException( "Unable to execute mutation", e ); } return edge1; } ); return ObservableTimer.time( observable, writeEdgeTimer ); }
private void confirmUniqueFields( MvccEntity mvccEntity, UUID version, ApplicationScope scope, MutationBatch logMutation) { final Entity entity = mvccEntity.getEntity().get(); // re-write the unique values but this time with no TTL final BatchStatement uniqueBatch = new BatchStatement(); for ( Field field : EntityUtils.getUniqueFields(mvccEntity.getEntity().get()) ) { UniqueValue written = new UniqueValueImpl( field, entity.getId(), version); uniqueBatch.add(uniqueValueStrat.writeCQL(scope, written, -1 )); logger.debug("Finalizing {} unique value {}", field.getName(), field.getValue().toString()); } try { logMutation.execute(); session.execute(uniqueBatch); } catch ( ConnectionException e ) { logger.error( "Failed to execute write asynchronously ", e ); throw new WriteCommitException( mvccEntity, scope, "Failed to execute write asynchronously ", e ); } }
@Test public void testNoFields() throws ConnectionException { final ApplicationScope collectionScope = mock( ApplicationScope.class ); final Keyspace keyspace = mock(Keyspace.class); final MutationBatch batch = mock(MutationBatch.class); when(keyspace.prepareMutationBatch()).thenReturn(batch); // set up the mock to return the entity from the start phase final Entity entity = generateEntity(); final MvccEntity mvccEntity = fromEntity( entity ); // run the stage WriteUniqueVerify newStage = new WriteUniqueVerify( uvstrat, fig, keyspace, cassandraConfig, null, null, null, session ); newStage.call( new CollectionIoEvent<>( collectionScope, mvccEntity ) ) ; // if we get here, it's a success. We want to test no exceptions are thrown verify(batch, never()).execute(); }
/** * Happy path */ @Test public void writeReadDelete() throws ConnectionException { final Id nodeId = IdGenerator.createId( "test" ); final long version = System.currentTimeMillis(); serialization.mark( scope, nodeId, version ).execute(); Optional<Long> returned = serialization.getMaxVersion( scope, nodeId ); assertEquals( version, returned.get().longValue() ); serialization.delete( scope, nodeId, returned.get() ).execute(); returned = serialization.getMaxVersion( scope, nodeId ); /** * Verifies that it is deleted */ assertFalse( returned.isPresent() ); }
write.execute();
/** * Tests a latent write from a previous version is discarded */ @Test public void multiGet() throws ConnectionException { final Id nodeId1 = IdGenerator.createId( "test" ); final Id nodeId2 = IdGenerator.createId( "test" ); final Id nodeId3 = IdGenerator.createId( "test" ); final long version = System.currentTimeMillis(); serialization.mark( scope, nodeId1, version ).execute(); serialization.mark( scope, nodeId2, version ).execute(); Map<Id, Long> marks = serialization.getMaxVersions( scope, Arrays.asList( createEdge( nodeId1, "test", nodeId2 ), createEdge( nodeId2, "test", nodeId3 ) ) ); assertEquals( version, marks.get( nodeId1 ).longValue() ); assertEquals( version, marks.get( nodeId2 ).longValue() ); assertFalse( marks.containsKey( nodeId3 ) ); } }
m.execute(); } catch (ConnectionException e) { throw new TemporaryBackendException(e);
serialization.writeEdge( scope, edge, UUIDGenerator.newTimeUUID() ).execute();
/** * Test write and read edge types from source -> target */ @Test public void readTargetEdgeTypes() throws ConnectionException { final Edge edge1 = createEdge( "source", "edge", "target" ); final Id sourceId = edge1.getSourceNode(); final Edge edge2 = createEdge( sourceId, "edge", IdGenerator.createId( "target2" ) ); final Edge edge3 = createEdge( sourceId, "edge2", IdGenerator.createId( "target3" ) ); //set writing the edge serialization.writeEdge( scope, edge1 ).execute(); serialization.writeEdge( scope, edge2 ).execute(); serialization.writeEdge( scope, edge3 ).execute(); //now check we get both types back Iterator<String> edges = serialization.getEdgeTypesFromSource( scope, createSearchEdge( sourceId, null ) ); assertEquals( "edge", edges.next() ); assertEquals( "edge2", edges.next() ); assertFalse( edges.hasNext() ); //now check we can resume correctly with a "last" edges = serialization.getEdgeTypesFromSource( scope, createSearchEdge( sourceId, "edge" ) ); assertEquals( "edge2", edges.next() ); assertFalse( edges.hasNext() ); }
/** * Test write and read edge types from source -> target */ @Test public void readSourceEdgeTypes() throws ConnectionException { final Edge edge1 = createEdge( "source", "edge", "target" ); final Id targetId = edge1.getTargetNode(); final Edge edge2 = createEdge( IdGenerator.createId( "source" ), "edge", targetId ); final Edge edge3 = createEdge( IdGenerator.createId( "source2" ), "edge2", targetId ); //set writing the edge serialization.writeEdge( scope, edge1 ).execute(); serialization.writeEdge( scope, edge2 ).execute(); serialization.writeEdge( scope, edge3 ).execute(); //now check we get both types back Iterator<String> edges = serialization.getEdgeTypesToTarget( scope, createSearchEdge( targetId, null ) ); assertEquals( "edge", edges.next() ); assertEquals( "edge2", edges.next() ); assertFalse( edges.hasNext() ); //now check we can resume correctly with a "last" edges = serialization.getEdgeTypesToTarget( scope, createSearchEdge( targetId, "edge" ) ); assertEquals( "edge2", edges.next() ); assertFalse( edges.hasNext() ); }
@Test public void testShardDelete() throws ConnectionException { final Id now = IdGenerator.createId( "test" ); final long timestamp = 2000L; final Shard shard1 = new Shard( 1000L, timestamp, false ); final Shard shard2 = new Shard( shard1.getShardIndex(), timestamp * 2, true ); final Shard shard3 = new Shard( shard2.getShardIndex() * 2, timestamp * 3, true ); final DirectedEdgeMeta sourceEdgeMeta = DirectedEdgeMeta.fromSourceNodeTargetType( now, "edgeType", "subType" ); MutationBatch batch = edgeShardSerialization.writeShardMeta( scope, shard1, sourceEdgeMeta ); batch.mergeShallow( edgeShardSerialization.writeShardMeta( scope, shard2, sourceEdgeMeta ) ); batch.mergeShallow( edgeShardSerialization.writeShardMeta( scope, shard3, sourceEdgeMeta ) ); batch.execute(); Iterator<Shard> results = edgeShardSerialization.getShardMetaData( scope, Optional.<Shard>absent(), sourceEdgeMeta ); // Latest timestamp comes first assertEquals( shard3, results.next() ); // This should now not remove anything edgeShardSerialization.removeShardMeta( scope, shard3, sourceEdgeMeta ).execute(); // Get iterator again results = edgeShardSerialization.getShardMetaData( scope, Optional.<Shard>absent(), sourceEdgeMeta ); // We should still have shard2 stored assertEquals( shard2, results.next() ); }
@Test public void cleanTargetSingleEdge() throws ConnectionException { MarkedEdge edge = createEdge( "source", "test", "target" ); storageEdgeSerialization.writeEdge( scope, edge, UUIDGenerator.newTimeUUID() ).execute(); edgeMetadataSerialization.writeEdge( scope, edge ).execute(); int value = edgeMetaRepair.repairTargets( scope, edge.getTargetNode(), edge.getType(), edge.getTimestamp() ) .toBlocking().single(); assertEquals( "No subtypes removed, edge exists", 1, value ); //now delete the edge storageEdgeSerialization.deleteEdge( scope, edge, UUIDGenerator.newTimeUUID() ).execute(); value = edgeMetaRepair.repairTargets( scope, edge.getTargetNode(), edge.getType(), edge.getTimestamp() ) .toBlocking().single(); assertEquals( "Single subtype should be removed", 0, value ); //now verify they're gone Iterator<String> edgeTypes = edgeMetadataSerialization .getEdgeTypesToTarget( scope, new SimpleSearchEdgeType( edge.getTargetNode(), null, null ) ); assertFalse( "No edge types exist", edgeTypes.hasNext() ); Iterator<String> sourceTypes = edgeMetadataSerialization .getIdTypesToTarget( scope, new SimpleSearchIdType( edge.getTargetNode(), edge.getType(), null, null ) ); assertFalse( "No edge types exist", sourceTypes.hasNext() ); }
@Test public void cleanSourceSingleEdge() throws ConnectionException { MarkedEdge edge = createEdge( "source", "test", "target" ); storageEdgeSerialization.writeEdge( scope, edge, UUIDGenerator.newTimeUUID() ).execute(); edgeMetadataSerialization.writeEdge( scope, edge ).execute(); int value = edgeMetaRepair.repairSources( scope, edge.getSourceNode(), edge.getType(), edge.getTimestamp() ) .toBlocking().single(); assertEquals( "No subtypes removed, edge exists", 1, value ); //now delete the edge storageEdgeSerialization.deleteEdge( scope, edge, UUIDGenerator.newTimeUUID() ).execute(); value = edgeMetaRepair.repairSources( scope, edge.getSourceNode(), edge.getType(), edge.getTimestamp() ) .toBlocking().single(); assertEquals( "Single subtype should be removed", 0, value ); //now verify they're gone Iterator<String> edgeTypes = edgeMetadataSerialization .getEdgeTypesFromSource( scope, new SimpleSearchEdgeType( edge.getSourceNode(), null, null ) ); assertFalse( "No edge types exist", edgeTypes.hasNext() ); Iterator<String> sourceTypes = edgeMetadataSerialization .getIdTypesFromSource( scope, new SimpleSearchIdType( edge.getSourceNode(), edge.getType(),null, null ) ); assertFalse( "No edge types exist", sourceTypes.hasNext() ); }
@Test public void createAndDelete() throws ConnectionException { final Id applicationId = new SimpleId( "application" ); ApplicationScope context = new ApplicationScopeImpl( applicationId ); final Id id = new SimpleId( "test" ); final UUID version = UUIDGenerator.newTimeUUID(); for ( Stage stage : Stage.values() ) { MvccLogEntry saved = new MvccLogEntryImpl( id, version, stage, MvccLogEntry.State.COMPLETE ); logEntryStrategy.write( context, saved ).execute(); //Read it back MvccLogEntry returned = logEntryStrategy.load( context, Collections.singleton( id ), version ).getMaxVersion( id ); assertNotNull( "Returned value should not be null", returned ); assertEquals( "Returned should equal the saved", saved, returned ); } }
/** * Tests an entity with more than 65535 bytes worth of data is successfully stored and retrieved */ @Test( expected = EntityTooLargeException.class ) public void entityLargerThanAllowedWrite() throws ConnectionException { final int setSize = serializationFig.getMaxEntitySize() + 1; final Entity entity = EntityHelper.generateEntity( setSize ); //now we have one massive, entity, save it and retrieve it. final Id applicationId = new SimpleId( "application" ); ApplicationScope context = new ApplicationScopeImpl( applicationId ); final Id id = entity.getId(); ValidationUtils.verifyIdentity( id ); final UUID version = UUIDGenerator.newTimeUUID(); final MvccEntity.Status status = MvccEntity.Status.COMPLETE; final MvccEntity mvccEntity = new MvccEntityImpl( id, version, status, entity ); getMvccEntitySerializationStrategy().write( context, mvccEntity ).execute(); }