public AggregateStorage(Connection connection, Datastore datastore, Duration retryBaseDelay) { this(new BigtableStorage(connection, retryBaseDelay), new DatastoreStorage(new CheckedDatastore(datastore), retryBaseDelay)); }
/** * @see Datastore#allocateId(IncompleteKey) * @throws IOException if the underlying client throws {@link DatastoreException} */ Key allocateId(IncompleteKey newKey) throws IOException { return call(() -> datastore.allocateId(newKey)); } }
void delete(WorkflowId workflowId) throws IOException { storeWithRetries(() -> { datastore.delete(workflowKey(datastore.newKeyFactory(), workflowId)); return null; }); }
Optional<Resource> getResource(String id) throws IOException { Entity entity = datastore.get(datastore.newKeyFactory().setKind(KIND_COUNTER_LIMIT).newKey(id)); if (entity == null) { return Optional.empty(); } return Optional.of(entityToResource(entity)); }
private Key activeWorkflowInstanceKey(WorkflowInstance workflowInstance) { return activeWorkflowInstanceKey(datastore.newKeyFactory(), workflowInstance); }
/** * Strongly consistently read all active states */ Map<WorkflowInstance, RunState> readActiveStates() throws IOException { // Strongly read active state keys from index shards in parallel final List<Key> keys = gatherIO(activeWorkflowInstanceIndexShardKeys(datastore.newKeyFactory()).stream() .map(key -> asyncIO(() -> datastore.query(Query.newEntityQueryBuilder() .setFilter(PropertyFilter.hasAncestor(key)) .setKind(KIND_ACTIVE_WORKFLOW_INSTANCE_INDEX_SHARD_ENTRY) .build()))) .collect(toList()), 30, TimeUnit.SECONDS) .stream() .flatMap(Collection::stream) .map(entity -> entity.getKey().getName()) .map(name -> activeWorkflowInstanceKey(datastore.newKeyFactory(), name)) .collect(toList()); // Strongly consistently read values for the above keys in parallel return gatherIO(Lists.partition(keys, MAX_NUMBER_OF_ENTITIES_IN_ONE_BATCH_READ).stream() .map(batch -> asyncIO(() -> readRunStateBatch(batch))) .collect(toList()), 30, TimeUnit.SECONDS) .stream() .flatMap(Collection::stream) .collect(toMap(RunState::workflowInstance, Function.identity())); }
private List<Backfill> backfillsForQuery(EntityQuery query) throws IOException { final List<Backfill> backfills = Lists.newArrayList(); datastore.query(query, entity -> backfills.add(entityToBackfill(entity))); return backfills; }
private void deleteShardsForCounter(String counterId) throws IOException { final List<Key> shards = new ArrayList<>(); datastore.query(EntityQuery.newEntityQueryBuilder() .setKind(KIND_COUNTER_SHARD) .setFilter(PropertyFilter.eq(PROPERTY_COUNTER_ID, counterId)) .build(), entity -> shards.add(entity.getKey())); // this is a safe guard to not to exceed max number of entities in one batch write // because in practice number of shards is much smaller for (List<Key> batch : Lists.partition(shards, MAX_NUMBER_OF_ENTITIES_IN_ONE_BATCH_WRITE)) { storeWithRetries(() -> { datastore.delete(batch.toArray(new Key[0])); return null; }); } }
/** * Strongly consistently read a batch of {@link RunState}s. */ private List<RunState> readRunStateBatch(List<Key> keys) throws IOException { assert keys.size() <= MAX_NUMBER_OF_ENTITIES_IN_ONE_BATCH_READ; final List<RunState> runStates = new ArrayList<>(); datastore.get(keys, entity -> runStates.add(entityToRunState(entity, parseWorkflowInstance(entity)))); return runStates; }
private StorageTransaction newTransaction() throws TransactionException { final CheckedDatastoreTransaction transaction; try { transaction = datastore.newTransaction(); } catch (DatastoreIOException e) { throw new TransactionException(e.getCause()); } return storageTransactionFactory.apply(transaction); }
Optional<Workflow> workflow(WorkflowId workflowId) throws IOException { final Optional<Entity> entityOptional = getOpt(datastore, workflowKey(datastore.newKeyFactory(), workflowId)) .filter(e -> e.contains(PROPERTY_WORKFLOW_JSON)); if (entityOptional.isPresent()) { return Optional.of(parseWorkflowJson(entityOptional.get(), workflowId)); } else { return Optional.empty(); } }
private List<Workflow> getBatchOfWorkflows(final List<WorkflowId> batch) throws IOException { final List<Key> keys = batch.stream() .map(workflowId -> workflowKey(datastore.newKeyFactory(), workflowId)) .collect(toList()); final List<Workflow> workflows = new ArrayList<>(); datastore.get(keys, entity -> { try { workflows.add(OBJECT_MAPPER.readValue(entity.getString(PROPERTY_WORKFLOW_JSON), Workflow.class)); } catch (IOException e) { LOG.warn("Failed to read workflow {}.", entity.getKey(), e); } }); return workflows; }
public List<Workflow> workflows(String componentId) throws IOException { final Key componentKey = componentKey(datastore.newKeyFactory(), componentId); final List<Workflow> workflows = Lists.newArrayList(); final EntityQuery query = Query.newEntityQueryBuilder() .setKind(KIND_WORKFLOW) .setFilter(PropertyFilter.hasAncestor(componentKey)) .build(); datastore.query(query, entity -> { final Workflow workflow; if (entity.contains(PROPERTY_WORKFLOW_JSON)) { try { workflow = OBJECT_MAPPER.readValue(entity.getString(PROPERTY_WORKFLOW_JSON), Workflow.class); } catch (IOException e) { LOG.warn("Failed to read workflow {}.", entity.getKey(), e); return; } workflows.add(workflow); } }); return workflows; }
private Map<WorkflowInstance, RunState> queryActiveStates(EntityQuery activeStatesQuery) throws IOException { final ImmutableMap.Builder<WorkflowInstance, RunState> mapBuilder = ImmutableMap.builder(); datastore.query(activeStatesQuery, entity -> { final WorkflowInstance instance = parseWorkflowInstance(entity); mapBuilder.put(instance, entityToRunState(entity, instance)); }); return mapBuilder.build(); }
Optional<RunState> readActiveState(WorkflowInstance instance) throws IOException { final Entity entity = datastore.get(activeWorkflowInstanceKey(instance)); if (entity == null) { return Optional.empty(); } else { return Optional.of(entityToRunState(entity, instance)); } }
boolean enabled(WorkflowId workflowId) throws IOException { final Key workflowKey = workflowKey(datastore.newKeyFactory(), workflowId); return getOpt(datastore, workflowKey) .filter(w -> w.contains(PROPERTY_WORKFLOW_ENABLED)) .map(workflow -> workflow.getBoolean(PROPERTY_WORKFLOW_ENABLED)) .orElse(DEFAULT_WORKFLOW_ENABLED); }
Map<Integer, Long> shardsForCounter(String counterId) throws IOException { final List<Key> shardKeys = IntStream.range(0, NUM_SHARDS).mapToObj( index -> datastore.newKeyFactory().setKind(KIND_COUNTER_SHARD).newKey( String.format("%s-%d", counterId, index))) .collect(toList()); final Map<Integer, Long> fetchedShards = new HashMap<>(); datastore.get(shardKeys, shard -> fetchedShards.put( (int) shard.getLong(PROPERTY_SHARD_INDEX), shard.getLong(PROPERTY_SHARD_VALUE))); return fetchedShards; }
/** * Delete resource by id. Deletes both counter shards and counter limit if it exists. * * <p>Due to Datastore limitations (modify max 25 entity groups per transaction), * we cannot do everything in one transaction. */ void deleteResource(String id) throws IOException { storeWithRetries(() -> { datastore.delete(datastore.newKeyFactory().setKind(KIND_COUNTER_LIMIT).newKey(id)); return null; }); deleteShardsForCounter(id); }
List<Resource> getResources() throws IOException { final EntityQuery query = Query.newEntityQueryBuilder().setKind(KIND_COUNTER_LIMIT).build(); final List<Resource> resources = Lists.newArrayList(); datastore.query(query, entity -> resources.add(entityToResource(entity))); return resources; }
@Override public Optional<Workflow> workflow(WorkflowId workflowId) throws IOException { final Optional<Entity> entityOptional = DatastoreStorage.getOpt(tx, DatastoreStorage.workflowKey(tx.getDatastore().newKeyFactory(), workflowId)); if (entityOptional.isPresent()) { return Optional.of(DatastoreStorage.parseWorkflowJson(entityOptional.get(), workflowId)); } else { return Optional.empty(); } }