@Override public boolean cellsCleanedUp(Transaction t, Set<Cell> cells) { SnapshotsStreamMetadataTable metaTable = tables.getSnapshotsStreamMetadataTable(t); Collection<SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow> rows = Lists.newArrayListWithCapacity(cells.size()); for (Cell cell : cells) { rows.add(SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow.BYTES_HYDRATOR.hydrateFromBytes(cell.getRowName())); } Map<SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow, StreamMetadata> currentMetadata = metaTable.getMetadatas(rows); Set<Long> toDelete = Sets.newHashSet(); for (Map.Entry<SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow, StreamMetadata> e : currentMetadata.entrySet()) { if (e.getValue().getStatus() != Status.STORED) { toDelete.add(e.getKey().getId()); } } SnapshotsStreamStore.of(tables).deleteStreams(t, toDelete); return false; } }
public static SnapshotsStreamStore of(TransactionManager txManager, TodoSchemaTableFactory tables) { return new SnapshotsStreamStore(txManager, tables); }
@Override public Map<Sha256Hash, Long> lookupStreamIdsByHash(Transaction t, final Set<Sha256Hash> hashes) { if (hashes.isEmpty()) { return ImmutableMap.of(); } SnapshotsStreamHashAidxTable idx = tables.getSnapshotsStreamHashAidxTable(t); Set<SnapshotsStreamHashAidxTable.SnapshotsStreamHashAidxRow> rows = getHashIndexRowsForHashes(hashes); Multimap<SnapshotsStreamHashAidxTable.SnapshotsStreamHashAidxRow, SnapshotsStreamHashAidxTable.SnapshotsStreamHashAidxColumnValue> m = idx.getRowsMultimap(rows); Map<Long, Sha256Hash> hashForStreams = Maps.newHashMap(); for (SnapshotsStreamHashAidxTable.SnapshotsStreamHashAidxRow r : m.keySet()) { for (SnapshotsStreamHashAidxTable.SnapshotsStreamHashAidxColumnValue v : m.get(r)) { Long streamId = v.getColumnName().getStreamId(); Sha256Hash hash = r.getHash(); if (hashForStreams.containsKey(streamId)) { AssertUtils.assertAndLog(log, hashForStreams.get(streamId).equals(hash), "(BUG) Stream ID has 2 different hashes: " + streamId); } hashForStreams.put(streamId, hash); } } Map<Long, StreamMetadata> metadata = getMetadata(t, hashForStreams.keySet()); Map<Sha256Hash, Long> ret = Maps.newHashMap(); for (Map.Entry<Long, StreamMetadata> e : metadata.entrySet()) { if (e.getValue().getStatus() != Status.STORED) { continue; } Sha256Hash hash = hashForStreams.get(e.getKey()); ret.put(hash, e.getKey()); } return ret; }
public void storeSnapshot(InputStream snapshot) { byte[] streamReference = "EteTest".getBytes(); TodoSchemaTableFactory tableFactory = TodoSchemaTableFactory.of(Namespace.DEFAULT_NAMESPACE); SnapshotsStreamStore streamStore = SnapshotsStreamStore.of(transactionManager, tableFactory); log.info("Storing stream..."); Pair<Long, Sha256Hash> storedStream = streamStore.storeStream(snapshot); Long newStreamId = storedStream.getLhSide(); log.info("Stored stream with ID {}", newStreamId); transactionManager.runTaskWithRetry(transaction -> { // Load previous stream, and unmark it as used LatestSnapshotTable.LatestSnapshotRow row = LatestSnapshotTable.LatestSnapshotRow.of(0L); LatestSnapshotTable latestSnapshotTable = tableFactory.getLatestSnapshotTable(transaction); Optional<LatestSnapshotTable.LatestSnapshotRowResult> maybeRow = latestSnapshotTable.getRow(row); maybeRow.ifPresent(latestSnapshot -> { Long latestStreamId = maybeRow.get().getStreamId(); log.info("Marking stream {}, ref {}, as unused", latestStreamId, PtBytes.toString(streamReference)); Map<Long, byte[]> theMap = ImmutableMap.of(latestStreamId, streamReference); streamStore.unmarkStreamsAsUsed(transaction, theMap); }); streamStore.markStreamAsUsed(transaction, newStreamId, streamReference); log.info("Marked stream {} as used with reference {}", newStreamId, PtBytes.toString(streamReference)); // Record the latest snapshot latestSnapshotTable.putStreamId(row, newStreamId); return null; }); }
@Override protected void putMetadataAndHashIndexTask(Transaction t, Map<Long, StreamMetadata> streamIdsToMetadata) { SnapshotsStreamMetadataTable mdTable = tables.getSnapshotsStreamMetadataTable(t); Map<Long, StreamMetadata> prevMetadatas = getMetadata(t, streamIdsToMetadata.keySet()); Map<SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow, StreamMetadata> rowsToStoredMetadata = Maps.newHashMap(); Map<SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow, StreamMetadata> rowsToUnstoredMetadata = Maps.newHashMap(); for (Entry<Long, StreamMetadata> e : streamIdsToMetadata.entrySet()) { long streamId = e.getKey(); StreamMetadata metadata = e.getValue(); StreamMetadata prevMetadata = prevMetadatas.get(streamId); if (metadata.getStatus() == Status.STORED) { if (prevMetadata == null || prevMetadata.getStatus() != Status.STORING) { // This can happen if we cleanup old streams. throw new TransactionFailedRetriableException("Cannot mark a stream as stored that isn't currently storing: " + prevMetadata); } rowsToStoredMetadata.put(SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow.of(streamId), metadata); } else if (metadata.getStatus() == Status.STORING) { // This will prevent two users trying to store the same id. if (prevMetadata != null) { throw new TransactionFailedRetriableException("Cannot reuse the same stream id: " + streamId); } rowsToUnstoredMetadata.put(SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow.of(streamId), metadata); } } putHashIndexTask(t, rowsToStoredMetadata); Map<SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow, StreamMetadata> rowsToMetadata = Maps.newHashMap(); rowsToMetadata.putAll(rowsToStoredMetadata); rowsToMetadata.putAll(rowsToUnstoredMetadata); mdTable.putMetadata(rowsToMetadata); }
@Override protected void storeBlock(Transaction t, long id, long blockNumber, final byte[] block) { Preconditions.checkArgument(block.length <= BLOCK_SIZE_IN_BYTES, "Block to store in DB must be less than BLOCK_SIZE_IN_BYTES"); final SnapshotsStreamValueTable.SnapshotsStreamValueRow row = SnapshotsStreamValueTable.SnapshotsStreamValueRow.of(id, blockNumber); try { // Do a touch operation on this table to ensure we get a conflict if someone cleans it up. touchMetadataWhileStoringForConflicts(t, row.getId(), row.getBlockId()); tables.getSnapshotsStreamValueTable(t).putValue(row, block); } catch (RuntimeException e) { log.error("Error storing block {} for stream id {}", row.getBlockId(), row.getId(), e); throw e; } }
@Override protected void loadSingleBlockToOutputStream(Transaction t, Long streamId, long blockId, OutputStream os) { SnapshotsStreamValueTable.SnapshotsStreamValueRow row = SnapshotsStreamValueTable.SnapshotsStreamValueRow.of(streamId, blockId); try { os.write(getBlock(t, row)); } catch (RuntimeException e) { log.error("Error storing block {} for stream id {}", row.getBlockId(), row.getId(), e); throw e; } catch (IOException e) { log.error("Error writing block {} to file when getting stream id {}", row.getBlockId(), row.getId(), e); throw Throwables.rewrapAndThrowUncheckedException("Error writing blocks to file when creating stream.", e); } }
@Override protected Map<Long, StreamMetadata> getMetadata(Transaction t, Set<Long> streamIds) { if (streamIds.isEmpty()) { return ImmutableMap.of(); } SnapshotsStreamMetadataTable table = tables.getSnapshotsStreamMetadataTable(t); Map<SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow, StreamMetadata> metadatas = table.getMetadatas(getMetadataRowsForIds(streamIds)); Map<Long, StreamMetadata> ret = Maps.newHashMap(); for (Map.Entry<SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow, StreamMetadata> e : metadatas.entrySet()) { ret.put(e.getKey().getId(), e.getValue()); } return ret; }
for (Entry<SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow, StreamMetadata> e : metadatas.entrySet()) { Long streamId = e.getKey().getId(); long blocks = getNumberOfBlocksFromMetadata(e.getValue()); for (long i = 0; i < blocks; i++) { streamValueToDelete.add(SnapshotsStreamValueTable.SnapshotsStreamValueRow.of(streamId, i));
@Override protected void putMetadataAndHashIndexTask(Transaction t, Map<Long, StreamMetadata> streamIdsToMetadata) { SnapshotsStreamMetadataTable mdTable = tables.getSnapshotsStreamMetadataTable(t); Map<Long, StreamMetadata> prevMetadatas = getMetadata(t, streamIdsToMetadata.keySet()); Map<SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow, StreamMetadata> rowsToStoredMetadata = Maps.newHashMap(); Map<SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow, StreamMetadata> rowsToUnstoredMetadata = Maps.newHashMap(); for (Entry<Long, StreamMetadata> e : streamIdsToMetadata.entrySet()) { long streamId = e.getKey(); StreamMetadata metadata = e.getValue(); StreamMetadata prevMetadata = prevMetadatas.get(streamId); if (metadata.getStatus() == Status.STORED) { if (prevMetadata == null || prevMetadata.getStatus() != Status.STORING) { // This can happen if we cleanup old streams. throw new TransactionFailedRetriableException("Cannot mark a stream as stored that isn't currently storing: " + prevMetadata); } rowsToStoredMetadata.put(SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow.of(streamId), metadata); } else if (metadata.getStatus() == Status.STORING) { // This will prevent two users trying to store the same id. if (prevMetadata != null) { throw new TransactionFailedRetriableException("Cannot reuse the same stream id: " + streamId); } rowsToUnstoredMetadata.put(SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow.of(streamId), metadata); } } putHashIndexTask(t, rowsToStoredMetadata); Map<SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow, StreamMetadata> rowsToMetadata = Maps.newHashMap(); rowsToMetadata.putAll(rowsToStoredMetadata); rowsToMetadata.putAll(rowsToUnstoredMetadata); mdTable.putMetadata(rowsToMetadata); }
@Override protected void storeBlock(Transaction t, long id, long blockNumber, final byte[] block) { Preconditions.checkArgument(block.length <= BLOCK_SIZE_IN_BYTES, "Block to store in DB must be less than BLOCK_SIZE_IN_BYTES"); final SnapshotsStreamValueTable.SnapshotsStreamValueRow row = SnapshotsStreamValueTable.SnapshotsStreamValueRow.of(id, blockNumber); try { // Do a touch operation on this table to ensure we get a conflict if someone cleans it up. touchMetadataWhileStoringForConflicts(t, row.getId(), row.getBlockId()); tables.getSnapshotsStreamValueTable(t).putValue(row, block); } catch (RuntimeException e) { log.error("Error storing block {} for stream id {}", row.getBlockId(), row.getId(), e); throw e; } }
@Override protected void loadSingleBlockToOutputStream(Transaction t, Long streamId, long blockId, OutputStream os) { SnapshotsStreamValueTable.SnapshotsStreamValueRow row = SnapshotsStreamValueTable.SnapshotsStreamValueRow.of(streamId, blockId); try { os.write(getBlock(t, row)); } catch (RuntimeException e) { log.error("Error storing block {} for stream id {}", row.getBlockId(), row.getId(), e); throw e; } catch (IOException e) { log.error("Error writing block {} to file when getting stream id {}", row.getBlockId(), row.getId(), e); throw Throwables.rewrapAndThrowUncheckedException("Error writing blocks to file when creating stream.", e); } }
@Override protected Map<Long, StreamMetadata> getMetadata(Transaction t, Set<Long> streamIds) { if (streamIds.isEmpty()) { return ImmutableMap.of(); } SnapshotsStreamMetadataTable table = tables.getSnapshotsStreamMetadataTable(t); Map<SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow, StreamMetadata> metadatas = table.getMetadatas(getMetadataRowsForIds(streamIds)); Map<Long, StreamMetadata> ret = Maps.newHashMap(); for (Map.Entry<SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow, StreamMetadata> e : metadatas.entrySet()) { ret.put(e.getKey().getId(), e.getValue()); } return ret; }
for (Entry<SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow, StreamMetadata> e : metadatas.entrySet()) { Long streamId = e.getKey().getId(); long blocks = getNumberOfBlocksFromMetadata(e.getValue()); for (long i = 0; i < blocks; i++) { streamValueToDelete.add(SnapshotsStreamValueTable.SnapshotsStreamValueRow.of(streamId, i));
@Override public boolean cellsCleanedUp(Transaction t, Set<Cell> cells) { SnapshotsStreamIdxTable usersIndex = tables.getSnapshotsStreamIdxTable(t); Set<SnapshotsStreamIdxTable.SnapshotsStreamIdxRow> rows = Sets.newHashSetWithExpectedSize(cells.size()); for (Cell cell : cells) { rows.add(SnapshotsStreamIdxTable.SnapshotsStreamIdxRow.BYTES_HYDRATOR.hydrateFromBytes(cell.getRowName())); } BatchColumnRangeSelection oneColumn = BatchColumnRangeSelection.create( PtBytes.EMPTY_BYTE_ARRAY, PtBytes.EMPTY_BYTE_ARRAY, 1); Map<SnapshotsStreamIdxTable.SnapshotsStreamIdxRow, BatchingVisitable<SnapshotsStreamIdxTable.SnapshotsStreamIdxColumnValue>> existentRows = usersIndex.getRowsColumnRange(rows, oneColumn); Set<SnapshotsStreamIdxTable.SnapshotsStreamIdxRow> rowsInDb = Sets.newHashSetWithExpectedSize(cells.size()); for (Map.Entry<SnapshotsStreamIdxTable.SnapshotsStreamIdxRow, BatchingVisitable<SnapshotsStreamIdxTable.SnapshotsStreamIdxColumnValue>> rowVisitable : existentRows.entrySet()) { rowVisitable.getValue().batchAccept(1, columnValues -> { if (!columnValues.isEmpty()) { rowsInDb.add(rowVisitable.getKey()); } return false; }); } Set<Long> toDelete = Sets.newHashSetWithExpectedSize(rows.size() - rowsInDb.size()); for (SnapshotsStreamIdxTable.SnapshotsStreamIdxRow rowToDelete : Sets.difference(rows, rowsInDb)) { toDelete.add(rowToDelete.getId()); } SnapshotsStreamStore.of(tables).deleteStreams(t, toDelete); return false; } }
@Override public Map<Sha256Hash, Long> lookupStreamIdsByHash(Transaction t, final Set<Sha256Hash> hashes) { if (hashes.isEmpty()) { return ImmutableMap.of(); } SnapshotsStreamHashAidxTable idx = tables.getSnapshotsStreamHashAidxTable(t); Set<SnapshotsStreamHashAidxTable.SnapshotsStreamHashAidxRow> rows = getHashIndexRowsForHashes(hashes); Multimap<SnapshotsStreamHashAidxTable.SnapshotsStreamHashAidxRow, SnapshotsStreamHashAidxTable.SnapshotsStreamHashAidxColumnValue> m = idx.getRowsMultimap(rows); Map<Long, Sha256Hash> hashForStreams = Maps.newHashMap(); for (SnapshotsStreamHashAidxTable.SnapshotsStreamHashAidxRow r : m.keySet()) { for (SnapshotsStreamHashAidxTable.SnapshotsStreamHashAidxColumnValue v : m.get(r)) { Long streamId = v.getColumnName().getStreamId(); Sha256Hash hash = r.getHash(); if (hashForStreams.containsKey(streamId)) { AssertUtils.assertAndLog(log, hashForStreams.get(streamId).equals(hash), "(BUG) Stream ID has 2 different hashes: " + streamId); } hashForStreams.put(streamId, hash); } } Map<Long, StreamMetadata> metadata = getMetadata(t, hashForStreams.keySet()); Map<Sha256Hash, Long> ret = Maps.newHashMap(); for (Map.Entry<Long, StreamMetadata> e : metadata.entrySet()) { if (e.getValue().getStatus() != Status.STORED) { continue; } Sha256Hash hash = hashForStreams.get(e.getKey()); ret.put(hash, e.getKey()); } return ret; }
/** * This should only be used by test code or as a performance optimization. */ static SnapshotsStreamStore of(TodoSchemaTableFactory tables) { return new SnapshotsStreamStore(null, tables); }
@Override public boolean cellsCleanedUp(Transaction t, Set<Cell> cells) { SnapshotsStreamMetadataTable metaTable = tables.getSnapshotsStreamMetadataTable(t); Collection<SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow> rows = Lists.newArrayListWithCapacity(cells.size()); for (Cell cell : cells) { rows.add(SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow.BYTES_HYDRATOR.hydrateFromBytes(cell.getRowName())); } Map<SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow, StreamMetadata> currentMetadata = metaTable.getMetadatas(rows); Set<Long> toDelete = Sets.newHashSet(); for (Map.Entry<SnapshotsStreamMetadataTable.SnapshotsStreamMetadataRow, StreamMetadata> e : currentMetadata.entrySet()) { if (e.getValue().getStatus() != Status.STORED) { toDelete.add(e.getKey().getId()); } } SnapshotsStreamStore.of(tables).deleteStreams(t, toDelete); return false; } }
public static SnapshotsStreamStore of(TransactionManager txManager, TodoSchemaTableFactory tables, Supplier<StreamStorePersistenceConfiguration> persistenceConfiguration) { return new SnapshotsStreamStore(txManager, tables, persistenceConfiguration); }
@Override public boolean cellsCleanedUp(Transaction t, Set<Cell> cells) { SnapshotsStreamIdxTable usersIndex = tables.getSnapshotsStreamIdxTable(t); Set<SnapshotsStreamIdxTable.SnapshotsStreamIdxRow> rows = Sets.newHashSetWithExpectedSize(cells.size()); for (Cell cell : cells) { rows.add(SnapshotsStreamIdxTable.SnapshotsStreamIdxRow.BYTES_HYDRATOR.hydrateFromBytes(cell.getRowName())); } BatchColumnRangeSelection oneColumn = BatchColumnRangeSelection.create( PtBytes.EMPTY_BYTE_ARRAY, PtBytes.EMPTY_BYTE_ARRAY, 1); Map<SnapshotsStreamIdxTable.SnapshotsStreamIdxRow, BatchingVisitable<SnapshotsStreamIdxTable.SnapshotsStreamIdxColumnValue>> existentRows = usersIndex.getRowsColumnRange(rows, oneColumn); Set<SnapshotsStreamIdxTable.SnapshotsStreamIdxRow> rowsInDb = Sets.newHashSetWithExpectedSize(cells.size()); for (Map.Entry<SnapshotsStreamIdxTable.SnapshotsStreamIdxRow, BatchingVisitable<SnapshotsStreamIdxTable.SnapshotsStreamIdxColumnValue>> rowVisitable : existentRows.entrySet()) { rowVisitable.getValue().batchAccept(1, columnValues -> { if (!columnValues.isEmpty()) { rowsInDb.add(rowVisitable.getKey()); } return false; }); } Set<Long> toDelete = Sets.newHashSetWithExpectedSize(rows.size() - rowsInDb.size()); for (SnapshotsStreamIdxTable.SnapshotsStreamIdxRow rowToDelete : Sets.difference(rows, rowsInDb)) { toDelete.add(rowToDelete.getId()); } SnapshotsStreamStore.of(tables).deleteStreams(t, toDelete); return false; } }