void truncateTables(Collection<TableReference> tablesToTruncate) { if (!tablesToTruncate.isEmpty()) { try { runTruncateInternal(tablesToTruncate); } catch (TException e) { throw Throwables.unwrapAndThrowAtlasDbDependencyException(e); } } }
private void runTruncateInternal(Collection<TableReference> tablesToTruncate) throws TException { clientPool.runWithRetry(client -> runTruncateOnClient(tablesToTruncate, client)); }
/** * Truncates tables in the key-value store. * <p> * This can be slightly faster than repeatedly truncating individual tables. * <p> * Requires all Cassandra nodes to be reachable. * * @param tablesToTruncate set od tables to truncate. * * @throws AtlasDbDependencyException if not all Cassandra nodes are reachable. * @throws (? extends RuntimeException) if the table does not exist. */ @Override public void truncateTables(final Set<TableReference> tablesToTruncate) { cassandraTableTruncator.truncateTables(tablesToTruncate); }
Void runTruncateOnClient(Collection<TableReference> tablesToTruncate, CassandraClient client) throws TException { truncationFunction(tablesToTruncate).apply(client); return null; }
wrappingQueryRunner); this.cassandraTableCreator = new CassandraTableCreator(clientPool, config); this.cassandraTableTruncator = new CassandraTableTruncator(queryRunner, clientPool); this.cassandraTableDropper = new CassandraTableDropper(config, clientPool, tableMetadata, cassandraTableTruncator);
Void runTruncateOnClient(Collection<TableReference> tablesToTruncate, CassandraClient client) throws TException { truncationFunction(tablesToTruncate).apply(client); return null; }
wrappingQueryRunner); this.cassandraTableCreator = new CassandraTableCreator(clientPool, config); this.cassandraTableTruncator = new CassandraTableTruncator(queryRunner, clientPool); this.cassandraTableDropper = new CassandraTableDropper(config, clientPool, tableMetadata, cassandraTableTruncator);
private void truncateThenDrop(TableReference tableRef, CassandraClient client) throws TException { cassandraTableTruncator.runTruncateOnClient(ImmutableSet.of(tableRef), client); client.system_drop_column_family(CassandraKeyValueServiceImpl.internalTableName(tableRef)); } }
/** * Truncates tables in the key-value store. * <p> * This can be slightly faster than repeatedly truncating individual tables. * <p> * Requires all Cassandra nodes to be reachable. * * @param tablesToTruncate set od tables to truncate. * * @throws AtlasDbDependencyException if not all Cassandra nodes are reachable. * @throws (? extends RuntimeException) if the table does not exist. */ @Override public void truncateTables(final Set<TableReference> tablesToTruncate) { cassandraTableTruncator.truncateTables(tablesToTruncate); }
void truncateTables(Collection<TableReference> tablesToTruncate) { if (!tablesToTruncate.isEmpty()) { try { runTruncateInternal(tablesToTruncate); } catch (TException e) { throw Throwables.unwrapAndThrowAtlasDbDependencyException(e); } } }
private void runTruncateInternal(Collection<TableReference> tablesToTruncate) throws TException { clientPool.runWithRetry(client -> runTruncateOnClient(tablesToTruncate, client)); }
@Override public void deleteRange(final TableReference tableRef, final RangeRequest range) { if (range.equals(RangeRequest.all())) { try { cassandraTableTruncator.truncateTables(ImmutableSet.of(tableRef)); } catch (AtlasDbDependencyException e) { log.info("Tried to make a deleteRange({}, RangeRequest.all())" + " into a more garbage-cleanup friendly truncate(), but this failed.", LoggingArgs.tableRef(tableRef), e); super.deleteRange(tableRef, range); } } else if (isForSingleRow(range.getStartInclusive(), range.getEndExclusive())) { try { long timestamp = mutationTimestampProvider.getRemoveTimestamp(); byte[] row = range.getStartInclusive(); clientPool.runWithRetry(client -> { client.remove("deleteRange", tableRef, row, timestamp, DELETE_CONSISTENCY); return null; }); } catch (UnavailableException e) { throw new InsufficientConsistencyException( "Deleting requires all Cassandra nodes to be up and available.", e); } catch (TException e) { throw Throwables.unwrapAndThrowAtlasDbDependencyException(e); } } else { super.deleteRange(tableRef, range); } }
private void truncateThenDrop(TableReference tableRef, CassandraClient client) throws TException { cassandraTableTruncator.runTruncateOnClient(ImmutableSet.of(tableRef), client); client.system_drop_column_family(CassandraKeyValueServiceImpl.internalTableName(tableRef)); } }
@Override public void deleteRange(final TableReference tableRef, final RangeRequest range) { if (range.equals(RangeRequest.all())) { try { cassandraTableTruncator.truncateTables(ImmutableSet.of(tableRef)); } catch (AtlasDbDependencyException e) { log.info("Tried to make a deleteRange({}, RangeRequest.all())" + " into a more garbage-cleanup friendly truncate(), but this failed.", LoggingArgs.tableRef(tableRef), e); super.deleteRange(tableRef, range); } } else if (isForSingleRow(range.getStartInclusive(), range.getEndExclusive())) { try { long timestamp = mutationTimestampProvider.getRemoveTimestamp(); byte[] row = range.getStartInclusive(); clientPool.runWithRetry(client -> { client.remove("deleteRange", tableRef, row, timestamp, DELETE_CONSISTENCY); return null; }); } catch (UnavailableException e) { throw new InsufficientConsistencyException( "Deleting requires all Cassandra nodes to be up and available.", e); } catch (TException e) { throw Throwables.unwrapAndThrowAtlasDbDependencyException(e); } } else { super.deleteRange(tableRef, range); } }