private WriteBatch writesFromDedicated(SweepableCellsRow row, SweepableCellsTable.SweepableCellsColumn col) { List<SweepableCellsRow> dedicatedRows = computeDedicatedRows(row, col); RowColumnRangeIterator iterator = getWithColumnRangeAll( Lists.transform(dedicatedRows, SweepableCellsRow::persistToBytes)); WriteBatch batch = new WriteBatch(); return batch.add(dedicatedRows, Streams.stream(iterator) .map(entry -> getWriteInfo(getTimestamp(row, col), entry.getValue())) .collect(Collectors.toList())); }
private WriteBatch getWrites(SweepableCellsRow row, SweepableCellsTable.SweepableCellsColumn col, Value value) { if (isReferenceToDedicatedRows(col)) { return writesFromDedicated(row, col); } else { return WriteBatch.single(getWriteInfo(getTimestamp(row, col), value)); } }
private WriteBatch getBatchOfWrites(SweepableCellsRow row, PeekingIterator<Map.Entry<Cell, Value>> resultIterator, long sweepTs) { WriteBatch writeBatch = new WriteBatch(); while (resultIterator.hasNext() && writeBatch.writesByStartTs.size() < SweepQueueUtils.SWEEP_BATCH_SIZE) { Map.Entry<Cell, Value> entry = resultIterator.next(); SweepableCellsTable.SweepableCellsColumn col = computeColumn(entry); long startTs = getTimestamp(row, col); if (knownToBeCommittedAfterSweepTs(startTs, sweepTs)) { writeBatch.add(ImmutableList.of(getWriteInfo(startTs, entry.getValue()))); return writeBatch; } writeBatch.merge(getWrites(row, col, entry.getValue())); } // there may be entries remaining with the same start timestamp as the last processed one. If that is the case // we want to include these ones as well. This is OK since there are at most MAX_CELLS_GENERIC - 1 of them. while (resultIterator.hasNext()) { Map.Entry<Cell, Value> entry = resultIterator.peek(); SweepableCellsTable.SweepableCellsColumn col = computeColumn(entry); long timestamp = getTimestamp(row, col); if (writeBatch.writesByStartTs.containsKey(timestamp)) { writeBatch.merge(getWrites(row, col, entry.getValue())); resultIterator.next(); } else { break; } } return writeBatch; }
private List<SweepableCellsRow> computeDedicatedRows(SweepableCellsRow row, SweepableCellsTable.SweepableCellsColumn col) { TargetedSweepMetadata metadata = TargetedSweepMetadata.BYTES_HYDRATOR.hydrateFromBytes(row.getMetadata()); long timestamp = getTimestamp(row, col); int numberOfDedicatedRows = writeIndexToNumberOfDedicatedRows(col.getWriteIndex()); List<SweepableCellsRow> dedicatedRows = new ArrayList<>(); for (int i = 0; i < numberOfDedicatedRows; i++) { byte[] dedicatedMetadata = ImmutableTargetedSweepMetadata.builder() .from(metadata) .dedicatedRow(true) .dedicatedRowNumber(i) .build() .persistToBytes(); dedicatedRows.add(SweepableCellsRow.of(timestamp, dedicatedMetadata)); } return dedicatedRows; }
private WriteBatch writesFromDedicated(SweepableCellsRow row, SweepableCellsTable.SweepableCellsColumn col) { List<SweepableCellsRow> dedicatedRows = computeDedicatedRows(row, col); RowColumnRangeIterator iterator = getWithColumnRangeAll( Lists.transform(dedicatedRows, SweepableCellsRow::persistToBytes)); WriteBatch batch = new WriteBatch(); return batch.add(dedicatedRows, Streams.stream(iterator) .map(entry -> getWriteInfo(getTimestamp(row, col), entry.getValue())) .collect(Collectors.toList())); }
private WriteBatch getWrites(SweepableCellsRow row, SweepableCellsTable.SweepableCellsColumn col, Value value) { if (isReferenceToDedicatedRows(col)) { return writesFromDedicated(row, col); } else { return WriteBatch.single(getWriteInfo(getTimestamp(row, col), value)); } }
private WriteBatch getBatchOfWrites(SweepableCellsRow row, PeekingIterator<Map.Entry<Cell, Value>> resultIterator, long sweepTs) { WriteBatch writeBatch = new WriteBatch(); while (resultIterator.hasNext() && writeBatch.writesByStartTs.size() < SweepQueueUtils.SWEEP_BATCH_SIZE) { Map.Entry<Cell, Value> entry = resultIterator.next(); SweepableCellsTable.SweepableCellsColumn col = computeColumn(entry); long startTs = getTimestamp(row, col); if (knownToBeCommittedAfterSweepTs(startTs, sweepTs)) { writeBatch.add(ImmutableList.of(getWriteInfo(startTs, entry.getValue()))); return writeBatch; } writeBatch.merge(getWrites(row, col, entry.getValue())); } // there may be entries remaining with the same start timestamp as the last processed one. If that is the case // we want to include these ones as well. This is OK since there are at most MAX_CELLS_GENERIC - 1 of them. while (resultIterator.hasNext()) { Map.Entry<Cell, Value> entry = resultIterator.peek(); SweepableCellsTable.SweepableCellsColumn col = computeColumn(entry); long timestamp = getTimestamp(row, col); if (writeBatch.writesByStartTs.containsKey(timestamp)) { writeBatch.merge(getWrites(row, col, entry.getValue())); resultIterator.next(); } else { break; } } return writeBatch; }
private List<SweepableCellsRow> computeDedicatedRows(SweepableCellsRow row, SweepableCellsTable.SweepableCellsColumn col) { TargetedSweepMetadata metadata = TargetedSweepMetadata.BYTES_HYDRATOR.hydrateFromBytes(row.getMetadata()); long timestamp = getTimestamp(row, col); int numberOfDedicatedRows = writeIndexToNumberOfDedicatedRows(col.getWriteIndex()); List<SweepableCellsRow> dedicatedRows = new ArrayList<>(); for (int i = 0; i < numberOfDedicatedRows; i++) { byte[] dedicatedMetadata = ImmutableTargetedSweepMetadata.builder() .from(metadata) .dedicatedRow(true) .dedicatedRowNumber(i) .build() .persistToBytes(); dedicatedRows.add(SweepableCellsRow.of(timestamp, dedicatedMetadata)); } return dedicatedRows; }