@Override public void updateAll(Iterable<Update> updates) { _local.get().updateAll(updates); }
_dataStore.updateAllForFacade(updates, tags); } else { _dataStore.updateAll(updates, tags);
@Override public void updateAll(Iterable<Update> updates, Set<String> tags) { _local.get().updateAll(updates, tags); }
private SuccessResponse doSimpleUpdateStream(final Optional<String> tableParam, final TimeUUIDParam changeIdParam, AuditParam auditParam, WriteConsistencyParam consistencyParam, Reader in, BooleanParam facade, Subject subject) { final Audit audit = getRequired(auditParam, "audit"); final WriteConsistency consistency = consistencyParam.get(); Iterator<Update> updates = Iterators.transform(Deltas.fromStream(in), new Function<Delta, Update>() { @Override public Update apply(Delta delta) { String table = tableParam.isPresent() ? tableParam.get() : extractKey(delta, Intrinsic.TABLE, String.class); checkArgument(table != null, "JSON object is missing field required by streaming update: %s", Intrinsic.TABLE); String key = extractKey(delta, Intrinsic.ID, String.class); checkArgument(key != null, "JSON object is missing field required by streaming update: %s", Intrinsic.ID); UUID changeId = (changeIdParam != null) ? changeIdParam.get() : TimeUUIDs.newUUID(); // optional, defaults to new uuid return new Update(table, key, changeId, delta, audit, consistency); } }); if (facade != null && facade.get()) { _dataStore.updateAllForFacade(asSubjectSafeUpdateIterable(updates, subject, true)); } else { // Parse and iterate through the deltas such that we never hold all the deltas in memory at once. _dataStore.updateAll(asSubjectSafeUpdateIterable(updates, subject, false)); } return SuccessResponse.instance(); }
@Override public void updateAll(Iterable<Update> updates) { _local.get().updateAll(updates); }
@Override public void updateAll(Iterable<Update> updates, Set<String> tags) { _local.get().updateAll(updates, tags); }
/** * Creates, updates or deletes zero or more pieces of content in the data store. * @param tags - Set of Strings or "tags" that would be attached to databus events generated for the updates */ public static void updateAll(DataStore dataStore, Iterator<Update> updateIter, Set<String> tags) { // Invoke dataStore.updateAll() with smallish batches so if one batch fails due to server failure, we will // fail over to another server using the Ostrich retry/failover infrastructure and continue from the point // of the failure. // Use time-based partitioning that adjusts batch sizes dynamically in an effort to settle on a batches that // take 500 milliseconds to execute. This should make the TimeLimitedIterator used by the DataStoreClient // unnecessary, but the TimeLimitedIterator is still relevant for clients that don't use DataStoreStreaming. // For now, hard-code initial/min/max/goal values. Iterator<List<Update>> batchIter = new TimePartitioningIterator<>(updateIter, 50, 1, 2500, Duration.ofMillis(500L)); while (batchIter.hasNext()) { // Ostrich will retry each batch as necessary dataStore.updateAll(batchIter.next(), tags); } }
/** * Creates, updates or deletes zero or more pieces of content in the data store. * @param tags - Set of Strings or "tags" that would be attached to databus events generated for the updates */ public static void updateAll(DataStore dataStore, Iterator<Update> updateIter, Set<String> tags) { // Invoke dataStore.updateAll() with smallish batches so if one batch fails due to server failure, we will // fail over to another server using the Ostrich retry/failover infrastructure and continue from the point // of the failure. // Use time-based partitioning that adjusts batch sizes dynamically in an effort to settle on a batches that // take 500 milliseconds to execute. This should make the TimeLimitedIterator used by the DataStoreClient // unnecessary, but the TimeLimitedIterator is still relevant for clients that don't use DataStoreStreaming. // For now, hard-code initial/min/max/goal values. Iterator<List<Update>> batchIter = new TimePartitioningIterator<>(updateIter, 50, 1, 2500, Duration.ofMillis(500L)); while (batchIter.hasNext()) { // Ostrich will retry each batch as necessary dataStore.updateAll(batchIter.next(), tags); } }
@Override public void updateAll(Iterable<Update> updates, Set<String> tags) { _writerPhaser.register(); try { Iterator<Update> updateIterator = updates.iterator(); _delegate.updateAll(closeableIterator(updateIterator), tags); if (updateIterator.hasNext()) { _writesRejectedCounter.inc(); throw new ServiceUnavailableException(); } } finally { _writerPhaser.arriveAndDeregister(); } }
@Override public void updateAll(Iterable<Update> updates, Set<String> tags) { _writerPhaser.register(); try { Iterator<Update> updateIterator = updates.iterator(); _delegate.updateAll(closeableIterator(updateIterator), tags); if (updateIterator.hasNext()) { _writesRejectedCounter.inc(); throw new ServiceUnavailableException(); } } finally { _writerPhaser.arriveAndDeregister(); } }
/** * Imports an arbitrary size stream of an array of JSON {@link Update} objects. In contrast to the "simpleUpdate" * APIs below, this method allows individual updates to have varying table changeId, audit, and consistency * parameters. This makes it a good generic batch wrapper for multiple update calls, but it's awkward to use * directly from simple clients like curl that just want to post a bunch of JSON objects into the SoR. */ @POST @Path ("_stream") @Consumes (MediaType.APPLICATION_JSON) @Timed (name = "bv.emodb.sor.DataStoreResource1.updateAll", absolute = true) @ApiOperation (value = "Does multiple update calls", notes = " Imports an arbitrary size stream of an array of JSON {@link Update} objects. In contrast to the \"simpleUpdate\"\n" + " APIs below, this method allows individual updates to have varying table changeId, audit, and consistency\n" + " parameters. This makes it a good generic batch wrapper for multiple update calls, but it's awkward to use\n" + " directly from simple clients like curl that just want to post a bunch of JSON objects into the SoR.", response = SuccessResponse.class ) @ApiImplicitParams ({@ApiImplicitParam (name = "APIKey", required = true)}) public SuccessResponse updateAll(InputStream in, @QueryParam ("tag") List<String> tags, @Authenticated Subject subject) { Set<String> tagsSet = (tags == null) ? ImmutableSet.<String>of() : Sets.newHashSet(tags); Iterable<Update> updates = asSubjectSafeUpdateIterable(new JsonStreamingArrayParser<>(in, Update.class), subject, false); _dataStore.updateAll(updates, tagsSet); return SuccessResponse.instance(); }
@Override public String createIdentity(String authenticationId, AuthIdentityModification<T> modification) throws IdentityExistsException { checkNotNull(authenticationId, "authenticationId"); checkNotNull(modification, "modification"); validateTables(); // Check whether the authentication ID conflicts with an existing identity. Note that we can't protect from a // race condition here; we rely on this method being run inside a global synchronization lock. if (getIdentityByAuthenticationId(authenticationId) != null) { throw new IdentityExistsException(); } String id = _uniqueIdSupplier.get(); String hashedAuthenticationId = hash(authenticationId); UUID changeId = TimeUUIDs.newUUID(); Audit audit = new AuditBuilder().setLocalHost().setComment("create identity").build(); T identity = modification.buildNew(id); // Ignore whatever masked ID was set; mask it now identity.setMaskedId(mask(authenticationId)); identity.setIssued(new Date()); Map<String, Object> map = convertIdentityToDataStoreEntry(identity); Update identityUpdate = new Update(_identityTableName, hashedAuthenticationId, changeId, Deltas.literal(map), audit, WriteConsistency.GLOBAL); map = ImmutableMap.<String, Object>of(HASHED_ID, hashedAuthenticationId); Update idUpdate = new Update(_idIndexTableName, id, changeId, Deltas.literal(map), audit, WriteConsistency.GLOBAL); // Update the identity and ID index in a single update _dataStore.updateAll(ImmutableList.of(identityUpdate, idUpdate)); return id; }
@Override public String createIdentity(String authenticationId, AuthIdentityModification<T> modification) throws IdentityExistsException { checkNotNull(authenticationId, "authenticationId"); checkNotNull(modification, "modification"); validateTables(); // Check whether the authentication ID conflicts with an existing identity. Note that we can't protect from a // race condition here; we rely on this method being run inside a global synchronization lock. if (getIdentityByAuthenticationId(authenticationId) != null) { throw new IdentityExistsException(); } String id = _uniqueIdSupplier.get(); String hashedAuthenticationId = hash(authenticationId); UUID changeId = TimeUUIDs.newUUID(); Audit audit = new AuditBuilder().setLocalHost().setComment("create identity").build(); T identity = modification.buildNew(id); // Ignore whatever masked ID was set; mask it now identity.setMaskedId(mask(authenticationId)); identity.setIssued(new Date()); Map<String, Object> map = convertIdentityToDataStoreEntry(identity); Update identityUpdate = new Update(_identityTableName, hashedAuthenticationId, changeId, Deltas.literal(map), audit, WriteConsistency.GLOBAL); map = ImmutableMap.<String, Object>of(HASHED_ID, hashedAuthenticationId); Update idUpdate = new Update(_idIndexTableName, id, changeId, Deltas.literal(map), audit, WriteConsistency.GLOBAL); // Update the identity and ID index in a single update _dataStore.updateAll(ImmutableList.of(identityUpdate, idUpdate)); return id; }
audit, WriteConsistency.GLOBAL); _dataStore.updateAll(ImmutableList.of(newIdentityCreate, oldIdentityDelete, idUpdate));
audit, WriteConsistency.GLOBAL); _dataStore.updateAll(ImmutableList.of(newIdentityCreate, oldIdentityDelete, idUpdate));
@Override public void deleteRole(RoleIdentifier id) { // First, verify the role exists Role role = getRole(id); if (role == null) { // Role doesn't exist. Don't raise an exception, just return now since there is no work to be done. return; } // Start by revoking all permissions. Even if the subsequent steps fail any users with this role won't have // any permissions from it once this step completes. _permissionManager.revokePermissions(PermissionIDs.forRole(id)); // As the inverse for creating roles the role is deleted before the group. UUID changeId = TimeUUIDs.newUUID(); String groupKey = checkGroup(role.getGroup()); Delta groupTableDelta = Deltas.mapBuilder() .update(IDS_ATTR, Deltas.setBuilder() .remove(role.getId()) .deleteIfEmpty() .build()) .deleteIfEmpty() .build(); Delta roleTableDelta = Deltas.delete(); Audit audit = new AuditBuilder().setLocalHost().setComment("Delete role " + id).build(); _dataStore.updateAll(ImmutableList.of( new Update(_groupTableName, groupKey, changeId, groupTableDelta, audit, WriteConsistency.GLOBAL), new Update(_roleTableName, id.toString(), changeId, roleTableDelta, audit, WriteConsistency.GLOBAL))); }
@Override public void deleteRole(RoleIdentifier id) { // First, verify the role exists Role role = getRole(id); if (role == null) { // Role doesn't exist. Don't raise an exception, just return now since there is no work to be done. return; } // Start by revoking all permissions. Even if the subsequent steps fail any users with this role won't have // any permissions from it once this step completes. _permissionManager.revokePermissions(PermissionIDs.forRole(id)); // As the inverse for creating roles the role is deleted before the group. UUID changeId = TimeUUIDs.newUUID(); String groupKey = checkGroup(role.getGroup()); Delta groupTableDelta = Deltas.mapBuilder() .update(IDS_ATTR, Deltas.setBuilder() .remove(role.getId()) .deleteIfEmpty() .build()) .deleteIfEmpty() .build(); Delta roleTableDelta = Deltas.delete(); Audit audit = new AuditBuilder().setLocalHost().setComment("Delete role " + id).build(); _dataStore.updateAll(ImmutableList.of( new Update(_groupTableName, groupKey, changeId, groupTableDelta, audit, WriteConsistency.GLOBAL), new Update(_roleTableName, id.toString(), changeId, roleTableDelta, audit, WriteConsistency.GLOBAL))); }
_dataStore.updateAll(ImmutableList.of( new Update(_groupTableName, groupKey, changeId, groupTableDelta, audit, WriteConsistency.GLOBAL), new Update(_roleTableName, id.toString(), changeId, roleTableDelta, audit, WriteConsistency.GLOBAL))); roleTableDelta = Deltas.delete(); _dataStore.updateAll(ImmutableList.of( new Update(_groupTableName, groupKey, changeId, groupTableDelta, audit, WriteConsistency.GLOBAL), new Update(_roleTableName, id.toString(), changeId, roleTableDelta, audit, WriteConsistency.GLOBAL)));
_dataStore.updateAll(ImmutableList.of( new Update(_groupTableName, groupKey, changeId, groupTableDelta, audit, WriteConsistency.GLOBAL), new Update(_roleTableName, id.toString(), changeId, roleTableDelta, audit, WriteConsistency.GLOBAL))); roleTableDelta = Deltas.delete(); _dataStore.updateAll(ImmutableList.of( new Update(_groupTableName, groupKey, changeId, groupTableDelta, audit, WriteConsistency.GLOBAL), new Update(_roleTableName, id.toString(), changeId, roleTableDelta, audit, WriteConsistency.GLOBAL)));