@Timed (name = "bv.emodb.table.AstyanaxTableDAO.audit", absolute = true) @Override public void audit(String name, String op, Audit audit) { checkNotNull(name, "table"); checkNotNull(audit, "audit"); checkTableChangesAllowed(name); Audit augmentedAudit = AuditBuilder.from(audit) .set("_op", op) .build(); updateTableMetadata(name, Deltas.noop(), augmentedAudit, null); }
@Timed (name = "bv.emodb.table.AstyanaxTableDAO.audit", absolute = true) @Override public void audit(String name, String op, Audit audit) { checkNotNull(name, "table"); checkNotNull(audit, "audit"); checkTableChangesAllowed(name); Audit augmentedAudit = AuditBuilder.from(audit) .set("_op", op) .build(); updateTableMetadata(name, Deltas.noop(), augmentedAudit, null); }
@Override public void setAttributes(String name, Map<String, ?> attributes, Audit audit) throws UnknownTableException { checkNotNull(name, "table"); checkNotNull(attributes, "attributes"); checkTableChangesAllowed(name); // Throw an exception if the table doesn't exist TableJson json = readTableJson(name, true); // write about the update attributes operation (metadata changed info) to a special system table. writeUnpublishedDatabusEvent(name, UnpublishedDatabusEventType.UPDATE_ATTRIBUTES); // Write the new table attributes to Cassandra Delta delta = json.newSetAttributes(attributes); Audit augmentedAudit = AuditBuilder.from(audit) .set("_op", "setAttributes") .build(); updateTableMetadata(json.getTable(), delta, augmentedAudit, InvalidationScope.GLOBAL); }
@Override public void setAttributes(String name, Map<String, ?> attributes, Audit audit) throws UnknownTableException { checkNotNull(name, "table"); checkNotNull(attributes, "attributes"); checkTableChangesAllowed(name); // Throw an exception if the table doesn't exist TableJson json = readTableJson(name, true); // write about the update attributes operation (metadata changed info) to a special system table. writeUnpublishedDatabusEvent(name, UnpublishedDatabusEventType.UPDATE_ATTRIBUTES); // Write the new table attributes to Cassandra Delta delta = json.newSetAttributes(attributes); Audit augmentedAudit = AuditBuilder.from(audit) .set("_op", "setAttributes") .build(); updateTableMetadata(json.getTable(), delta, augmentedAudit, InvalidationScope.GLOBAL); }
Audit augmentedAudit = AuditBuilder.from(audit) .set("_op", "create") .set("_uuid", uuid)
Audit augmentedAudit = AuditBuilder.from(audit) .set("_op", "create") .set("_uuid", uuid)
private void moveStart(TableJson json, Storage src, String destUuid, String destPlacement, int shardsLog2, String op, Optional<Audit> audit, MoveType moveType) { Delta delta; if (moveType == MoveType.FULL_PLACEMENT) { delta = json.newMovePlacementStart(src, destUuid, destPlacement, shardsLog2); } else { delta = json.newMoveStart(src, destUuid, destPlacement, shardsLog2); } Audit augmentedAudit = (audit.isPresent() ? AuditBuilder.from(audit.get()) : new AuditBuilder()) .set("_op", op) .set("_srcUuid", src.getUuidString()) .set("_srcPlacement", src.getPlacement()) .set("_destUuid", destUuid) .set("_destPlacement", destPlacement) .build(); updateTableMetadata(json.getTable(), delta, augmentedAudit, InvalidationScope.GLOBAL); }
private void moveStart(TableJson json, Storage src, String destUuid, String destPlacement, int shardsLog2, String op, Optional<Audit> audit, MoveType moveType) { Delta delta; if (moveType == MoveType.FULL_PLACEMENT) { delta = json.newMovePlacementStart(src, destUuid, destPlacement, shardsLog2); } else { delta = json.newMoveStart(src, destUuid, destPlacement, shardsLog2); } Audit augmentedAudit = (audit.isPresent() ? AuditBuilder.from(audit.get()) : new AuditBuilder()) .set("_op", op) .set("_srcUuid", src.getUuidString()) .set("_srcPlacement", src.getPlacement()) .set("_destUuid", destUuid) .set("_destPlacement", destPlacement) .build(); updateTableMetadata(json.getTable(), delta, augmentedAudit, InvalidationScope.GLOBAL); }
@Timed (name = "bv.emodb.table.AstyanaxTableDAO.createFacade", absolute = true) @Override public void createFacade(String name, FacadeOptions facadeOptions, Audit audit) throws FacadeExistsException { checkNotNull(name, "table"); checkNotNull(facadeOptions, "facadeDefinition"); checkNotNull(audit, "audit"); checkTableChangesAllowed(name); // If placement move is in progress, create the new facade in its new placement facadeOptions = replacePlacementIfMoveInProgress(facadeOptions); if (!checkFacadeAllowed(name, facadeOptions)) { return; // Nothing to do } // Check that the placement string is valid String placement = checkPlacement(facadeOptions.getPlacement()); // Pick a unique table uuid that will determine where in Cassandra all the data will be stored. String uuid = newTableUuidString(name, audit); // Write the new facade definition to Cassandra. Delta delta = TableJson.newCreateFacade(uuid, placement, _defaultShardsLog2); Audit augmentedAudit = AuditBuilder.from(audit) .set("_op", "createFacade") .set("_uuid", uuid) .set("_placement", facadeOptions.getPlacement()) .build(); updateTableMetadata(name, delta, augmentedAudit, InvalidationScope.GLOBAL); }
@Override public void dropFacade(String name, String placement, Audit audit) throws UnknownFacadeException { checkNotNull(name, "table"); checkNotNull(audit, "audit"); checkPlacement(placement); checkTableChangesAllowed(name); // Read the table metadata from the DataStore (this is often a recursive call into the DataStore). Do this // directly instead of using get()/getInternal() to avoid validation checks so we can drop an invalid table. TableJson json = readTableJson(name, true); // write about the drop operation (metadata changed info) to a special system table. writeUnpublishedDatabusEvent(name, UnpublishedDatabusEventType.DROP_FACADE); // Find the facade for the specified placement. Storage facadeStorage = json.getFacadeForPlacement(placement); Delta delta = json.newDropFacade(facadeStorage); Audit augmentedAudit = AuditBuilder.from(audit) .set("_op", "dropFacade") .set("_uuid", json.getMasterStorage().getUuidString()) .build(); updateTableMetadata(json.getTable(), delta, augmentedAudit, InvalidationScope.GLOBAL); }
@Timed (name = "bv.emodb.table.AstyanaxTableDAO.createFacade", absolute = true) @Override public void createFacade(String name, FacadeOptions facadeOptions, Audit audit) throws FacadeExistsException { checkNotNull(name, "table"); checkNotNull(facadeOptions, "facadeDefinition"); checkNotNull(audit, "audit"); checkTableChangesAllowed(name); // If placement move is in progress, create the new facade in its new placement facadeOptions = replacePlacementIfMoveInProgress(facadeOptions); if (!checkFacadeAllowed(name, facadeOptions)) { return; // Nothing to do } // Check that the placement string is valid String placement = checkPlacement(facadeOptions.getPlacement()); // Pick a unique table uuid that will determine where in Cassandra all the data will be stored. String uuid = newTableUuidString(name, audit); // Write the new facade definition to Cassandra. Delta delta = TableJson.newCreateFacade(uuid, placement, _defaultShardsLog2); Audit augmentedAudit = AuditBuilder.from(audit) .set("_op", "createFacade") .set("_uuid", uuid) .set("_placement", facadeOptions.getPlacement()) .build(); updateTableMetadata(name, delta, augmentedAudit, InvalidationScope.GLOBAL); }
@Override public void dropFacade(String name, String placement, Audit audit) throws UnknownFacadeException { checkNotNull(name, "table"); checkNotNull(audit, "audit"); checkPlacement(placement); checkTableChangesAllowed(name); // Read the table metadata from the DataStore (this is often a recursive call into the DataStore). Do this // directly instead of using get()/getInternal() to avoid validation checks so we can drop an invalid table. TableJson json = readTableJson(name, true); // write about the drop operation (metadata changed info) to a special system table. writeUnpublishedDatabusEvent(name, UnpublishedDatabusEventType.DROP_FACADE); // Find the facade for the specified placement. Storage facadeStorage = json.getFacadeForPlacement(placement); Delta delta = json.newDropFacade(facadeStorage); Audit augmentedAudit = AuditBuilder.from(audit) .set("_op", "dropFacade") .set("_uuid", json.getMasterStorage().getUuidString()) .build(); updateTableMetadata(json.getTable(), delta, augmentedAudit, InvalidationScope.GLOBAL); }
@Timed (name = "bv.emodb.table.AstyanaxTableDAO.drop", absolute = true) @Override public void drop(String name, Audit audit) throws UnknownTableException { checkNotNull(name, "table"); checkNotNull(audit, "audit"); checkTableChangesAllowed(name); // Dropping a table progresses through the following steps: // 1. [SYSTEM DATA CENTER] Mark the uuid as dropped. // 2. [LOCAL DATA CENTER] Purge the data from Cassandra (initial cleanup). // -- Wait a while to ensure all servers in the cluster have stopped writing to src and all eventually // consistent writes have been applied. // 3. [LOCAL DATA CENTER] Purge the data from Cassandra (final cleanup). // 4. [SYSTEM DATA CENTER] Delete the uuid from the storage map. If the table hasn't been re-created, // delete the table json completely from the backing store. // Read the table metadata from the DataStore (this is often a recursive call into the DataStore). Do this // directly instead of using get()/getInternal() to avoid validation checks so we can drop an invalid table. TableJson json = readTableJson(name, true); // write about the drop operation (metadata changed info) to a special system table. writeUnpublishedDatabusEvent(name, UnpublishedDatabusEventType.DROP_TABLE); // now, update the Table Metadata Delta delta = json.newDropTable(); Audit augmentedAudit = AuditBuilder.from(audit) .set("_op", "drop") .set("_uuid", json.getMasterStorage().getUuidString()) .build(); updateTableMetadata(json.getTable(), delta, augmentedAudit, InvalidationScope.GLOBAL); }
@Timed (name = "bv.emodb.table.AstyanaxTableDAO.drop", absolute = true) @Override public void drop(String name, Audit audit) throws UnknownTableException { checkNotNull(name, "table"); checkNotNull(audit, "audit"); checkTableChangesAllowed(name); // Dropping a table progresses through the following steps: // 1. [SYSTEM DATA CENTER] Mark the uuid as dropped. // 2. [LOCAL DATA CENTER] Purge the data from Cassandra (initial cleanup). // -- Wait a while to ensure all servers in the cluster have stopped writing to src and all eventually // consistent writes have been applied. // 3. [LOCAL DATA CENTER] Purge the data from Cassandra (final cleanup). // 4. [SYSTEM DATA CENTER] Delete the uuid from the storage map. If the table hasn't been re-created, // delete the table json completely from the backing store. // Read the table metadata from the DataStore (this is often a recursive call into the DataStore). Do this // directly instead of using get()/getInternal() to avoid validation checks so we can drop an invalid table. TableJson json = readTableJson(name, true); // write about the drop operation (metadata changed info) to a special system table. writeUnpublishedDatabusEvent(name, UnpublishedDatabusEventType.DROP_TABLE); // now, update the Table Metadata Delta delta = json.newDropTable(); Audit augmentedAudit = AuditBuilder.from(audit) .set("_op", "drop") .set("_uuid", json.getMasterStorage().getUuidString()) .build(); updateTableMetadata(json.getTable(), delta, augmentedAudit, InvalidationScope.GLOBAL); }