/** * Deletes values from the key-value store. * <p> * Requires all Cassandra nodes to be up and available, otherwise throws an PalantirRuntimeException. * * @param tableRef the name of the table to delete values from. * @param keys map containing the keys to delete values for. * * @throws PalantirRuntimeException if not all hosts respond successfully. */ @Override public void delete(TableReference tableRef, Multimap<Cell, Long> keys) { new CellDeleter(clientPool, wrappingQueryRunner, DELETE_CONSISTENCY, mutationTimestampProvider.getDeletionTimestampOperatorForBatchDelete()).delete(tableRef, keys); }
@Test public void supplierBackedProviderQueriesSupplierForRangeTombstoneTimestamps() { assertThat(supplierBackedProvider.getRangeTombstoneTimestamp(3141592)).isEqualTo(1); assertThat(supplierBackedProvider.getRangeTombstoneTimestamp(0)).isEqualTo(2); assertThat(supplierBackedProvider.getRangeTombstoneTimestamp(1L << 60)).isEqualTo(3); } }
@Test public void supplierBackedProviderQueriesSupplierForSweepSentinelTimestamps() { assertThat(supplierBackedProvider.getSweepSentinelWriteTimestamp()).isEqualTo(1); assertThat(supplierBackedProvider.getSweepSentinelWriteTimestamp()).isEqualTo(2); assertThat(supplierBackedProvider.getSweepSentinelWriteTimestamp()).isEqualTo(3); }
@Override public void deleteRange(final TableReference tableRef, final RangeRequest range) { if (range.equals(RangeRequest.all())) { try { cassandraTableTruncator.truncateTables(ImmutableSet.of(tableRef)); } catch (AtlasDbDependencyException e) { log.info("Tried to make a deleteRange({}, RangeRequest.all())" + " into a more garbage-cleanup friendly truncate(), but this failed.", LoggingArgs.tableRef(tableRef), e); super.deleteRange(tableRef, range); } } else if (isForSingleRow(range.getStartInclusive(), range.getEndExclusive())) { try { long timestamp = mutationTimestampProvider.getRemoveTimestamp(); byte[] row = range.getStartInclusive(); clientPool.runWithRetry(client -> { client.remove("deleteRange", tableRef, row, timestamp, DELETE_CONSISTENCY); return null; }); } catch (UnavailableException e) { throw new InsufficientConsistencyException( "Deleting requires all Cassandra nodes to be up and available.", e); } catch (TException e) { throw Throwables.unwrapAndThrowAtlasDbDependencyException(e); } } else { super.deleteRange(tableRef, range); } }
@Override public void deleteRange(final TableReference tableRef, final RangeRequest range) { if (range.equals(RangeRequest.all())) { try { cassandraTableTruncator.truncateTables(ImmutableSet.of(tableRef)); } catch (AtlasDbDependencyException e) { log.info("Tried to make a deleteRange({}, RangeRequest.all())" + " into a more garbage-cleanup friendly truncate(), but this failed.", LoggingArgs.tableRef(tableRef), e); super.deleteRange(tableRef, range); } } else if (isForSingleRow(range.getStartInclusive(), range.getEndExclusive())) { try { long timestamp = mutationTimestampProvider.getRemoveTimestamp(); byte[] row = range.getStartInclusive(); clientPool.runWithRetry(client -> { client.remove("deleteRange", tableRef, row, timestamp, DELETE_CONSISTENCY); return null; }); } catch (UnavailableException e) { throw new InsufficientConsistencyException( "Deleting requires all Cassandra nodes to be up and available.", e); } catch (TException e) { throw Throwables.unwrapAndThrowAtlasDbDependencyException(e); } } else { super.deleteRange(tableRef, range); } }
@Test public void supplierBackedProviderQueriesSupplierForDeletionTimestamps() { assertThat(supplierBackedProvider.getDeletionTimestampOperatorForBatchDelete().applyAsLong(1234)).isEqualTo(1); assertThat(supplierBackedProvider.getDeletionTimestampOperatorForBatchDelete().applyAsLong(12345678)) .isEqualTo(2); assertThat(supplierBackedProvider.getDeletionTimestampOperatorForBatchDelete().applyAsLong(314159265358979L)) .isEqualTo(3); }
@Test public void legacyProviderWritesSweepSentinelsAtTimestampZero() { assertThat(LEGACY_PROVIDER.getSweepSentinelWriteTimestamp()).isEqualTo(0); assertThat(LEGACY_PROVIDER.getSweepSentinelWriteTimestamp()).isEqualTo(0); }
@Test public void legacyProviderWritesRangeTombstonesAtAtlasTimestampPlusOne() { assertThat(LEGACY_PROVIDER.getRangeTombstoneTimestamp(1234)).isEqualTo(1234 + 1); assertThat(LEGACY_PROVIDER.getRangeTombstoneTimestamp(12345678)).isEqualTo(12345678 + 1); }
@Test public void legacyProviderWritesTombstonesAtAtlasTimestampPlusOne() { assertThat(LEGACY_PROVIDER.getDeletionTimestampOperatorForBatchDelete().applyAsLong(1234)).isEqualTo(1234 + 1); assertThat(LEGACY_PROVIDER.getDeletionTimestampOperatorForBatchDelete().applyAsLong(12345678)) .isEqualTo(12345678 + 1); }
@Test public void supplierBackedProviderOnlyInvokesSupplierOncePerDeletionOperator() { LongUnaryOperator deletionTimestamps = supplierBackedProvider.getDeletionTimestampOperatorForBatchDelete(); assertThat(deletionTimestamps.applyAsLong(42)).isEqualTo(1); assertThat(deletionTimestamps.applyAsLong(9999)).isEqualTo(1); assertThat(deletionTimestamps.applyAsLong(1213141516171819L)).isEqualTo(1); }
/** * Deletes values from the key-value store. * <p> * Requires all Cassandra nodes to be up and available, otherwise throws an PalantirRuntimeException. * * @param tableRef the name of the table to delete values from. * @param keys map containing the keys to delete values for. * * @throws PalantirRuntimeException if not all hosts respond successfully. */ @Override public void delete(TableReference tableRef, Multimap<Cell, Long> keys) { new CellDeleter(clientPool, wrappingQueryRunner, DELETE_CONSISTENCY, mutationTimestampProvider.getDeletionTimestampOperatorForBatchDelete()).delete(tableRef, keys); }