private void addBoundStatement(BoundStatement boundStatement) { Iterator<Host> hosts = m_loadBalancingPolicy.newQueryPlan(m_clusterConnection.getKeyspace(), boundStatement); if (hosts.hasNext()) { Host hostKey = hosts.next(); BatchStatement batchStatement = m_batchMap.get(hostKey); if (batchStatement == null) { batchStatement = new BatchStatement(BatchStatement.Type.UNLOGGED); m_batchMap.put(hostKey, batchStatement); } batchStatement.add(boundStatement); } else { dataPointBatch.add(boundStatement); } }
protected void executeBatch(Statement... statements) { LOG.debug("Execute cassandra list of statements"); if (LOG.isDebugEnabled()) { LOG.debug("Execute cassandra statements {} ", Arrays.toString(statements)); } BatchStatement batchStatement = new BatchStatement(getBatchType()); for (Statement statement : statements) { batchStatement.add(statement); } executeBatch(batchStatement); }
@Override public PairBatchStatementTuples apply(List<PairStatementTuple> l) { final List<Tuple> inputs = new LinkedList<>(); final BatchStatement batch = new BatchStatement(BatchStatement.Type.UNLOGGED); for (PairStatementTuple pair : l) { batch.add(pair.getStatement()); inputs.add(pair.getTuple()); } return new PairBatchStatementTuples(inputs, batch); } });
public void addMetricName(String metricName) { m_newMetrics.add(metricName); BoundStatement bs = new BoundStatement(m_clusterConnection.psStringIndexInsert); bs.setBytesUnsafe(0, ByteBuffer.wrap(ROW_KEY_METRIC_NAMES.getBytes(UTF_8))); bs.setString(1, metricName); bs.setConsistencyLevel(m_consistencyLevel); metricNamesBatch.add(bs); }
@Override public BatchStatement deleteCQL( final ApplicationScope applicationScope, final UniqueValue uniqueValue ) { final MigrationRelationship<UniqueValueSerializationStrategy> migration = getMigrationRelationShip(); if ( migration.needsMigration() ) { final BatchStatement batch = new BatchStatement(); batch.add(migration.from.deleteCQL( applicationScope, uniqueValue ) ); batch.add(migration.to.deleteCQL( applicationScope, uniqueValue ) ); return batch; } return migration.to.deleteCQL( applicationScope, uniqueValue ); }
@Override public void deleteTokens(final List<UUID> tokenUUIDs, final ByteBuffer principalKeyBuffer){ Preconditions.checkNotNull(tokenUUIDs, "token UUID list is required"); Preconditions.checkNotNull(tokenUUIDs, "principalKeyBuffer is required"); logger.trace("deleteTokens, token UUIDs: {}", tokenUUIDs); final BatchStatement batchStatement = new BatchStatement(); tokenUUIDs.forEach( tokenUUID -> batchStatement.add( QueryBuilder.delete() .from(TOKENS_TABLE) .where(QueryBuilder .eq("key", DataType.uuid().serialize(tokenUUID, ProtocolVersion.NEWEST_SUPPORTED))) ) ); batchStatement.add( QueryBuilder.delete() .from(PRINCIPAL_TOKENS_TABLE) .where(QueryBuilder .eq("key", principalKeyBuffer))); session.execute(batchStatement); }
private void confirmUniqueFields( MvccEntity mvccEntity, UUID version, ApplicationScope scope, MutationBatch logMutation) { final Entity entity = mvccEntity.getEntity().get(); // re-write the unique values but this time with no TTL final BatchStatement uniqueBatch = new BatchStatement(); for ( Field field : EntityUtils.getUniqueFields(mvccEntity.getEntity().get()) ) { UniqueValue written = new UniqueValueImpl( field, entity.getId(), version); uniqueBatch.add(uniqueValueStrat.writeCQL(scope, written, -1 )); logger.debug("Finalizing {} unique value {}", field.getName(), field.getValue().toString()); } try { logMutation.execute(); session.execute(uniqueBatch); } catch ( ConnectionException e ) { logger.error( "Failed to execute write asynchronously ", e ); throw new WriteCommitException( mvccEntity, scope, "Failed to execute write asynchronously ", e ); } }
@Override public void deleteAllShards(String queueName, String region) { BatchStatement batch = new BatchStatement(); Shard.Type[] shardTypes = new Shard.Type[]{Shard.Type.DEFAULT, Shard.Type.INFLIGHT}; for (Shard.Type shardType : shardTypes) { Statement delete = QueryBuilder.delete().from( getTableName( shardType ) ) .where( QueryBuilder.eq(COLUMN_QUEUE_NAME, queueName) ) .and( QueryBuilder.eq(COLUMN_REGION, region) ); logger.trace("Removing shards for queue {} region {} shardType {} query {}", queueName, region, shardType, batch.toString()); batch.add( delete ); } cassandraClient.getQueueMessageSession().execute( batch ); }
tasks.forEach(task -> { String taskPayload = toJson(task); batchStatement.add(insertTaskStatement.bind(UUID.fromString(workflowId), DEFAULT_SHARD_ID, task.getTaskId(), taskPayload)); recordCassandraDaoRequests("createTask", task.getTaskType(), task.getWorkflowType()); recordCassandraDaoPayloadSize("createTask", taskPayload.length(), task.getTaskType(), task.getWorkflowType()); }); batchStatement.add(updateTotalTasksStatement.bind(totalTasks, UUID.fromString(workflowId), DEFAULT_SHARD_ID)); session.execute(batchStatement);
private boolean removeTask(Task task) { // TODO: calculate shard number based on seq and maxTasksPerShard try { // get total tasks for this workflow WorkflowMetadata workflowMetadata = getWorkflowMetadata(task.getWorkflowInstanceId()); int totalTasks = workflowMetadata.getTotalTasks(); // remove from task_lookup table removeTaskLookup(task); recordCassandraDaoRequests("removeTask", task.getTaskType(), task.getWorkflowType()); // delete task from workflows table and decrement total tasks by 1 BatchStatement batchStatement = new BatchStatement(); batchStatement.add(deleteTaskStatement.bind(UUID.fromString(task.getWorkflowInstanceId()), DEFAULT_SHARD_ID, task.getTaskId())); batchStatement.add(updateTotalTasksStatement.bind(totalTasks - 1, UUID.fromString(task.getWorkflowInstanceId()), DEFAULT_SHARD_ID)); ResultSet resultSet = session.execute(batchStatement); return resultSet.wasApplied(); } catch (Exception e) { Monitors.error(CLASS_NAME, "removeTask"); String errorMsg = String.format("Failed to remove task: %s", task.getTaskId()); LOGGER.error(errorMsg, e); throw new ApplicationException(ApplicationException.Code.BACKEND_ERROR, errorMsg); } }
@Override public void revokeToken(final UUID tokenUUID, final ByteBuffer principalKeyBuffer){ Preconditions.checkNotNull(tokenUUID, "token UUID is required"); logger.trace("revokeToken, token UUID: {}", tokenUUID); final BatchStatement batchStatement = new BatchStatement(); batchStatement.add( QueryBuilder.delete() .from(TOKENS_TABLE) .where(QueryBuilder .eq("key", DataType.uuid().serialize(tokenUUID, ProtocolVersion.NEWEST_SUPPORTED)))); if(principalKeyBuffer != null){ batchStatement.add( QueryBuilder.delete() .from(PRINCIPAL_TOKENS_TABLE) .where(QueryBuilder .eq("key", principalKeyBuffer)) .and(QueryBuilder .eq("column1", DataType.uuid().serialize(tokenUUID, ProtocolVersion.NEWEST_SUPPORTED)))); } session.execute(batchStatement); }
public void call( final Throwable t ) { if ( t instanceof CollectionRuntimeException ) { CollectionRuntimeException cre = ( CollectionRuntimeException ) t; final MvccEntity mvccEntity = cre.getEntity(); final ApplicationScope scope = cre.getApplicationScope(); // one batch to handle rollback MutationBatch rollbackMb = null; final BatchStatement uniqueDeleteBatch = new BatchStatement(); final Optional<Entity> entity = mvccEntity.getEntity(); if ( entity.isPresent() ) { for ( final Field field : entity.get().getFields() ) { // if it's unique, add its deletion to the rollback batch if ( field.isUnique() ) { UniqueValue toDelete = new UniqueValueImpl( field, entity.get().getId(), mvccEntity.getVersion() ); uniqueDeleteBatch.add(uniqueValueStrat.deleteCQL(scope, toDelete )); } } // execute the batch statements for deleting unique field entries session.execute(uniqueDeleteBatch); logEntryStrat.delete( scope, entity.get().getId(), mvccEntity.getVersion() ); } } }
public void addRowKey(String metricName, DataPointsRowKey rowKey, int rowKeyTtl) { m_newRowKeys.add(rowKey); ByteBuffer bb = ByteBuffer.allocate(8); bb.putLong(0, rowKey.getTimestamp()); Statement bs = m_clusterConnection.psRowKeyTimeInsert.bind() .setString(0, metricName) .setTimestamp(1, new Date(rowKey.getTimestamp())) //.setBytesUnsafe(1, bb) //Setting timestamp in a more optimal way .setInt(2, rowKeyTtl) .setIdempotent(true); bs.setConsistencyLevel(m_consistencyLevel); rowKeyBatch.add(bs); bs = m_clusterConnection.psRowKeyInsert.bind() .setString(0, metricName) .setTimestamp(1, new Date(rowKey.getTimestamp())) //.setBytesUnsafe(1, bb) //Setting timestamp in a more optimal way .setString(2, rowKey.getDataType()) .setMap(3, rowKey.getTags()) .setInt(4, rowKeyTtl) .setIdempotent(true); bs.setConsistencyLevel(m_consistencyLevel); rowKeyBatch.add(bs); }
@Override public void putUuid( final MapScope scope, final String key, final UUID putUuid ) { Preconditions.checkNotNull( scope, "mapscope is required" ); Preconditions.checkNotNull( key, "key is required" ); Preconditions.checkNotNull( putUuid, "value is required" ); final BatchStatement batchStatement = new BatchStatement(); batchStatement.add(QueryBuilder.insertInto(MAP_ENTRIES_TABLE) .value("key", getMapEntryPartitionKey(scope, key)) .value("column1", DataType.cboolean().serialize(true, ProtocolVersion.NEWEST_SUPPORTED)) .value("value", DataType.uuid().serialize(putUuid, ProtocolVersion.NEWEST_SUPPORTED))); final int bucket = BUCKET_LOCATOR.getCurrentBucket( scope.getName() ); batchStatement.add(QueryBuilder.insertInto(MAP_KEYS_TABLE) .value("key", getMapKeyPartitionKey(scope, bucket)) .value("column1", DataType.text().serialize(key, ProtocolVersion.NEWEST_SUPPORTED)) .value("value", DataType.serializeValue(null, ProtocolVersion.NEWEST_SUPPORTED))); session.execute(batchStatement); }
Shard shard = defaultShardIterator.next(); Statement deleteAll = createDeleteAllMessagesStatement( shard ); deleteAllBatch.add( deleteAll );
Using timeToLive = QueryBuilder.ttl(ttl); batchStatement.add(QueryBuilder.insertInto(MAP_ENTRIES_TABLE) .using(timeToLive) .value("key", getMapEntryPartitionKey(scope, key)) batchStatement.add(QueryBuilder.insertInto(MAP_KEYS_TABLE) .using(timeToLive) .value("key", getMapKeyPartitionKey(scope, bucket)) }else{ batchStatement.add(QueryBuilder.insertInto(MAP_ENTRIES_TABLE) .value("key", getMapEntryPartitionKey(scope, key)) .value("column1", DataType.cboolean().serialize(true, ProtocolVersion.NEWEST_SUPPORTED)) batchStatement.add(QueryBuilder.insertInto(MAP_KEYS_TABLE) .value("key", getMapKeyPartitionKey(scope, bucket)) .value("column1", DataType.text().serialize(key, ProtocolVersion.NEWEST_SUPPORTED))
final Clause uniqueEqColumn = QueryBuilder.eq("column1", columnValue ); Statement uniqueDelete = QueryBuilder.delete().from(TABLE_UNIQUE_VALUES).where(uniqueEqKey).and(uniqueEqColumn); batch.add(uniqueDelete); .from(TABLE_UNIQUE_VALUES_LOG).where(uniqueLogEqKey).and( uniqueLogEqColumn); batch.add(uniqueLogDelete);