@Override public long getTimestamp() { return getStartTimestamp(); }
private boolean wasCommitSuccessful(long commitTs) throws Exception { Map<Long, Long> commitTimestamps = getCommitTimestamps(null, Collections.singleton(getStartTimestamp()), false); long storedCommit = commitTimestamps.get(getStartTimestamp()); if (storedCommit != commitTs && storedCommit != TransactionConstants.FAILED_COMMIT_TS) { Validate.isTrue(false, "Commit value is wrong. startTs %s commitTs: %s", getStartTimestamp(), commitTs); } return storedCommit == commitTs; }
@Override public Map<Cell, byte[]> getIgnoringLocalWrites(TableReference tableRef, Set<Cell> cells) { checkGetPreconditions(tableRef); if (Iterables.isEmpty(cells)) { return ImmutableMap.of(); } hasReads = true; Map<Cell, byte[]> result = getFromKeyValueService(tableRef, cells); validatePreCommitRequirementsOnReadIfNecessary(tableRef, getStartTimestamp()); return Maps.filterValues(result, Predicates.not(Value.IS_EMPTY)); }
@Override public Map<Cell, byte[]> get(TableReference tableRef, Set<Cell> cells) { Timer.Context timer = getTimer("get").time(); checkGetPreconditions(tableRef); if (Iterables.isEmpty(cells)) { return ImmutableMap.of(); } hasReads = true; Map<Cell, byte[]> result = Maps.newHashMap(); SortedMap<Cell, byte[]> writes = writesByTable.get(tableRef); if (writes != null) { for (Cell cell : cells) { if (writes.containsKey(cell)) { result.put(cell, writes.get(cell)); } } } // We don't need to read any cells that were written locally. result.putAll(getFromKeyValueService(tableRef, Sets.difference(cells, result.keySet()))); long getMillis = TimeUnit.NANOSECONDS.toMillis(timer.stop()); if (perfLogger.isDebugEnabled()) { perfLogger.debug("get({}, {} cells) found {} cells (some possibly deleted), took {} ms", tableRef, cells.size(), result.size(), getMillis); } validatePreCommitRequirementsOnReadIfNecessary(tableRef, getStartTimestamp()); return Maps.filterValues(result, Predicates.not(Value.IS_EMPTY)); }
@Override public Map<byte[], BatchingVisitable<Map.Entry<Cell, byte[]>>> getRowsColumnRange( TableReference tableRef, Iterable<byte[]> rows, BatchColumnRangeSelection columnRangeSelection) { checkGetPreconditions(tableRef); if (Iterables.isEmpty(rows)) { return ImmutableMap.of(); } hasReads = true; Map<byte[], RowColumnRangeIterator> rawResults = keyValueService.getRowsColumnRange(tableRef, rows, columnRangeSelection, getStartTimestamp()); Map<byte[], BatchingVisitable<Map.Entry<Cell, byte[]>>> postFilteredResults = Maps.newHashMapWithExpectedSize(rawResults.size()); for (Entry<byte[], RowColumnRangeIterator> e : rawResults.entrySet()) { byte[] row = e.getKey(); RowColumnRangeIterator rawIterator = e.getValue(); Iterator<Map.Entry<Cell, byte[]>> postFilteredIterator = getPostFilteredColumns(tableRef, columnRangeSelection, row, rawIterator); postFilteredResults.put(row, BatchingVisitableFromIterable.create(postFilteredIterator)); } return postFilteredResults; }
/** * This will load the given keys from the underlying key value service and apply postFiltering * so we have snapshot isolation. If the value in the key value service is the empty array * this will be included here and needs to be filtered out. */ private Map<Cell, byte[]> getFromKeyValueService(TableReference tableRef, Set<Cell> cells) { ImmutableMap.Builder<Cell, byte[]> result = ImmutableMap.builderWithExpectedSize(cells.size()); Map<Cell, Long> toRead = Cells.constantValueMap(cells, getStartTimestamp()); Map<Cell, Value> rawResults = keyValueService.get(tableRef, toRead); getWithPostFiltering(tableRef, rawResults, result, Value.GET_VALUE); return result.build(); }
@Override public SortedMap<byte[], RowResult<byte[]>> getRowsIgnoringLocalWrites( TableReference tableRef, Iterable<byte[]> rows) { checkGetPreconditions(tableRef); if (Iterables.isEmpty(rows)) { return AbstractTransaction.EMPTY_SORTED_ROWS; } hasReads = true; Map<Cell, Value> rawResults = Maps.newHashMap(keyValueService.getRows(tableRef, rows, ColumnSelection.all(), getStartTimestamp())); validatePreCommitRequirementsOnReadIfNecessary(tableRef, getStartTimestamp()); return filterRowResults(tableRef, rawResults, ImmutableMap.builderWithExpectedSize(rawResults.size())); }
/** * This will attempt to put the commitTimestamp into the DB. * * @throws TransactionLockTimeoutException If our locks timed out while trying to commit. * @throws TransactionCommitFailedException failed when committing in a way that isn't retriable */ private void putCommitTimestamp( long commitTimestamp, LockToken locksToken, TransactionService transactionService) throws TransactionFailedException { Validate.isTrue(commitTimestamp > getStartTimestamp(), "commitTs must be greater than startTs"); try { transactionService.putUnlessExists(getStartTimestamp(), commitTimestamp); } catch (KeyAlreadyExistsException e) { handleKeyAlreadyExistsException(commitTimestamp, e, locksToken); } catch (Exception e) { TransactionCommitFailedException commitFailedEx = new TransactionCommitFailedException( "This transaction failed writing the commit timestamp. " + "It might have been committed, but it may not have.", e); log.error("failed to commit an atlasdb transaction", commitFailedEx); transactionOutcomeMetrics.markPutUnlessExistsFailed(); throw commitFailedEx; } }
@Override public SortedMap<byte[], RowResult<byte[]>> getRows(TableReference tableRef, Iterable<byte[]> rows, ColumnSelection columnSelection) { Timer.Context timer = getTimer("getRows").time(); checkGetPreconditions(tableRef); if (Iterables.isEmpty(rows)) { return AbstractTransaction.EMPTY_SORTED_ROWS; } hasReads = true; ImmutableMap.Builder<Cell, byte[]> result = ImmutableSortedMap.naturalOrder(); Map<Cell, Value> rawResults = Maps.newHashMap( keyValueService.getRows(tableRef, rows, columnSelection, getStartTimestamp())); SortedMap<Cell, byte[]> writes = writesByTable.get(tableRef); if (writes != null) { for (byte[] row : rows) { extractLocalWritesForRow(result, writes, row, columnSelection); } } // We don't need to do work postFiltering if we have a write locally. rawResults.keySet().removeAll(result.build().keySet()); SortedMap<byte[], RowResult<byte[]>> results = filterRowResults(tableRef, rawResults, result); long getRowsMillis = TimeUnit.NANOSECONDS.toMillis(timer.stop()); if (perfLogger.isDebugEnabled()) { perfLogger.debug("getRows({}, {} rows) found {} rows, took {} ms", tableRef, Iterables.size(rows), results.size(), getRowsMillis); } validatePreCommitRequirementsOnReadIfNecessary(tableRef, getStartTimestamp()); return results; }
Cell key = e.getKey(); long theirStartTimestamp = e.getValue(); AssertUtils.assertAndLog(log, theirStartTimestamp != getStartTimestamp(), "Timestamp reuse is bad:%d", getStartTimestamp()); AssertUtils.assertAndLog(log, theirCommitTimestamp != getStartTimestamp(), "Timestamp reuse is bad:%d", getStartTimestamp()); if (theirStartTimestamp > getStartTimestamp()) { dominatingWrites.add(Cells.createConflictWithMetadata( keyValueService, theirStartTimestamp, theirCommitTimestamp)); } else if (theirCommitTimestamp > getStartTimestamp()) { spanningWrites.add(Cells.createConflictWithMetadata( keyValueService,
@Override public void abort() { if (state.get() == State.ABORTED) { return; } while (true) { ensureUncommitted(); if (state.compareAndSet(State.UNCOMMITTED, State.ABORTED)) { if (hasWrites()) { throwIfPreCommitRequirementsNotMet(null, getStartTimestamp()); } transactionOutcomeMetrics.markAbort(); return; } } }
@Override public Iterator<Map.Entry<Cell, byte[]>> getRowsColumnRange(TableReference tableRef, Iterable<byte[]> rows, ColumnRangeSelection columnRangeSelection, int batchHint) { checkGetPreconditions(tableRef); if (Iterables.isEmpty(rows)) { return Collections.emptyIterator(); } hasReads = true; RowColumnRangeIterator rawResults = keyValueService.getRowsColumnRange(tableRef, rows, columnRangeSelection, batchHint, getStartTimestamp()); if (!rawResults.hasNext()) { validatePreCommitRequirementsOnReadIfNecessary(tableRef, getStartTimestamp()); } // else the postFiltered iterator will check for each batch. Iterator<Map.Entry<byte[], RowColumnRangeIterator>> rawResultsByRow = partitionByRow(rawResults); Iterator<Iterator<Map.Entry<Cell, byte[]>>> postFiltered = Iterators.transform(rawResultsByRow, e -> { byte[] row = e.getKey(); RowColumnRangeIterator rawIterator = e.getValue(); BatchColumnRangeSelection batchColumnRangeSelection = BatchColumnRangeSelection.create(columnRangeSelection, batchHint); return getPostFilteredColumns(tableRef, batchColumnRangeSelection, row, rawIterator); }); return Iterators.concat(postFiltered); }
protected void throwIfWriteAlreadyCommitted(TableReference tableRef, Map<Cell, byte[]> writes, ConflictHandler conflictHandler, LockToken commitLocksToken, TransactionService transactionService) throws TransactionConflictException { if (writes.isEmpty() || !conflictHandler.checkWriteWriteConflicts()) { return; } Set<CellConflict> spanningWrites = Sets.newHashSet(); Set<CellConflict> dominatingWrites = Sets.newHashSet(); Map<Cell, Long> keysToLoad = Maps.asMap(writes.keySet(), Functions.constant(Long.MAX_VALUE)); while (!keysToLoad.isEmpty()) { keysToLoad = detectWriteAlreadyCommittedInternal( tableRef, keysToLoad, spanningWrites, dominatingWrites, transactionService); } if (conflictHandler == ConflictHandler.RETRY_ON_VALUE_CHANGED) { throwIfValueChangedConflict(tableRef, writes, spanningWrites, dominatingWrites, commitLocksToken); } else { if (!spanningWrites.isEmpty() || !dominatingWrites.isEmpty()) { transactionOutcomeMetrics.markWriteWriteConflict(tableRef); throw TransactionConflictException.create(tableRef, getStartTimestamp(), spanningWrites, dominatingWrites, System.currentTimeMillis() - timeCreated); } } }
cleaner.queueCellsForScrubbing(getCellsToQueueForScrubbing(), getStartTimestamp()); long transactionMillis = TimeUnit.NANOSECONDS.toMillis(transactionTimerContext.stop()); perfLogger.debug("Committed transaction {} in {}ms", getStartTimestamp(), transactionMillis);
transactionOutcomeMetrics.markLocksExpired(); throw new TransactionLockTimeoutException("Our commit was already rolled back at commit time" + " because our locks timed out. startTs: " + getStartTimestamp() + ". " + getExpiredLocksErrorString(commitLocksToken, expiredLocks), ex); } else { } catch (Exception e1) { log.error("Failed to determine if we can retry this transaction. startTs: {}", SafeArg.of("startTs", getStartTimestamp()), e1); + " startTs: " + getStartTimestamp(); throw new TransactionCommitFailedException(msg, ex);
throwIfPreCommitRequirementsNotMet(commitLocksToken, getStartTimestamp()); Validate.isTrue(false, "Missing conflicting value for cell: %s for table %s", cellToConflict.get(cell), table); throwIfPreCommitRequirementsNotMet(commitLocksToken, getStartTimestamp()); Validate.isTrue(false, "Wrong timestamp for cell in table %s Expected: %s Actual: %s", table, cellToConflict.get(cell), transactionOutcomeMetrics.markWriteWriteConflict(table); throw TransactionConflictException.create(table, getStartTimestamp(), Sets.filter(spanningWrites, conflicting), Sets.filter(dominatingWrites, conflicting),
Function<Value, T> transformer) { RowRangeBatchProvider batchProvider = new RowRangeBatchProvider(keyValueService, tableRef, range, getStartTimestamp()); BatchSizeIncreasingIterator<RowResult<Value>> results = new BatchSizeIncreasingIterator<>(batchProvider, preFilterBatchSize, null);
Timer.Context timer = getTimer("processedRangeMillis").time(); Map<RangeRequest, TokenBackedBasicResultsPage<RowResult<Value>, byte[]>> firstPages = keyValueService.getFirstBatchForRanges(tableRef, input, getStartTimestamp()); validatePreCommitRequirementsOnReadIfNecessary(tableRef, getStartTimestamp());
RowColumnRangeIterator rawIterator) { ColumnRangeBatchProvider batchProvider = new ColumnRangeBatchProvider( keyValueService, tableRef, row, columnRangeSelection, getStartTimestamp()); BatchSizeIncreasingIterator<Map.Entry<Cell, Value>> batchIterator = new BatchSizeIncreasingIterator<>( batchProvider, columnRangeSelection.getBatchHint(), ClosableIterators.wrap(rawIterator));
AtlasRowLockDescriptor.of( TransactionConstants.TRANSACTION_TABLE.getQualifiedName(), TransactionConstants.getValueForTimestamp(getStartTimestamp()))); continue;