private boolean addRowTombstone(LegacyRangeTombstone tombstone) { if (clustering != null) { // If we're already in the row, there might be a chance that there were two range tombstones // written, as 2.x storage format does not guarantee just one range tombstone, unlike 3.x. // We have to make sure that clustering matches, which would mean that tombstone is for the // same row. if (rowDeletion != null && clustering.equals(tombstone.start.getAsClustering(metadata))) { // If the tombstone superceeds the previous delete, we discard the previous one if (tombstone.deletionTime.supersedes(rowDeletion.deletionTime)) { builder.addRowDeletion(Row.Deletion.regular(tombstone.deletionTime)); rowDeletion = tombstone; } return true; } // If we're already within a row and there was no delete written before that one, it can't be the same one return false; } clustering = tombstone.start.getAsClustering(metadata); builder.newRow(clustering); builder.addRowDeletion(Row.Deletion.regular(tombstone.deletionTime)); rowDeletion = tombstone; return true; }
private Unfiltered fixNegativeLocalExpirationTime(Row row) { Row.Builder builder = HeapAllocator.instance.cloningBTreeRowBuilder(); builder.newRow(row.clustering()); builder.addPrimaryKeyLivenessInfo(row.primaryKeyLivenessInfo().isExpiring() && row.primaryKeyLivenessInfo().localExpirationTime() < 0 ? row.primaryKeyLivenessInfo().withUpdatedTimestampAndLocalDeletionTime(row.primaryKeyLivenessInfo().timestamp() + 1, AbstractCell.MAX_DELETION_TIME) :row.primaryKeyLivenessInfo()); builder.addRowDeletion(row.deletion()); for (ColumnData cd : row) { if (cd.column().isSimple()) { Cell cell = (Cell)cd; builder.addCell(cell.isExpiring() && cell.localDeletionTime() < 0 ? cell.withUpdatedTimestampAndLocalDeletionTime(cell.timestamp() + 1, AbstractCell.MAX_DELETION_TIME) : cell); } else { ComplexColumnData complexData = (ComplexColumnData)cd; builder.addComplexDeletion(complexData.column(), complexData.complexDeletion()); for (Cell cell : complexData) { builder.addCell(cell.isExpiring() && cell.localDeletionTime() < 0 ? cell.withUpdatedTimestampAndLocalDeletionTime(cell.timestamp() + 1, AbstractCell.MAX_DELETION_TIME) : cell); } } } return builder.build(); } }
private void deleteOldEntryInternal(Row existingBaseRow, Row mergedBaseRow) { startNewUpdate(existingBaseRow); long timestamp = computeTimestampForEntryDeletion(existingBaseRow, mergedBaseRow); long rowDeletion = mergedBaseRow.deletion().time().markedForDeleteAt(); assert timestamp >= rowDeletion; // If computed deletion timestamp greater than row deletion, it must be coming from // 1. non-pk base column used in view pk, or // 2. unselected base column // any case, we need to use it as expired livenessInfo // If computed deletion timestamp is from row deletion, we only need row deletion itself if (timestamp > rowDeletion) { /** * We use an expired liveness instead of a row tombstone to allow a shadowed MV * entry to co-exist with a row tombstone, see ViewComplexTest#testCommutativeRowDeletion. * * TODO This is a dirty overload of LivenessInfo and we should modify * the storage engine to properly support this on CASSANDRA-13826. */ LivenessInfo info = LivenessInfo.withExpirationTime(timestamp, LivenessInfo.EXPIRED_LIVENESS_TTL, nowInSec); currentViewEntryBuilder.addPrimaryKeyLivenessInfo(info); } currentViewEntryBuilder.addRowDeletion(mergedBaseRow.deletion()); addDifferentCells(existingBaseRow, mergedBaseRow); submitUpdate(); }
/** * Creates a view entry corresponding to the provided base row. * <p> * This method checks that the base row does match the view filter before applying it. */ private void createEntry(Row baseRow) { // Before create a new entry, make sure it matches the view filter if (!matchesViewFilter(baseRow)) return; startNewUpdate(baseRow); currentViewEntryBuilder.addPrimaryKeyLivenessInfo(computeLivenessInfoForEntry(baseRow)); currentViewEntryBuilder.addRowDeletion(baseRow.deletion()); for (ColumnData data : baseRow) { ColumnDefinition viewColumn = view.getViewColumn(data.column()); // If that base table column is not denormalized in the view, we had nothing to do. // Alose, if it's part of the view PK it's already been taken into account in the clustering. if (viewColumn == null || viewColumn.isPrimaryKeyColumn()) continue; addColumnData(viewColumn, data); } submitUpdate(); }
/** * Creates a view entry corresponding to the provided base row. * <p> * This method checks that the base row does match the view filter before applying it. */ private void createEntry(Row baseRow) { // Before create a new entry, make sure it matches the view filter if (!matchesViewFilter(baseRow)) return; startNewUpdate(baseRow); currentViewEntryBuilder.addPrimaryKeyLivenessInfo(computeLivenessInfoForEntry(baseRow)); currentViewEntryBuilder.addRowDeletion(baseRow.deletion()); for (ColumnData data : baseRow) { ColumnDefinition viewColumn = view.getViewColumn(data.column()); // If that base table column is not denormalized in the view, we had nothing to do. // Alose, if it's part of the view PK it's already been taken into account in the clustering. if (viewColumn == null || viewColumn.isPrimaryKeyColumn()) continue; addColumnData(viewColumn, data); } submitUpdate(); }
/** * Creates a view entry corresponding to the provided base row. * <p> * This method checks that the base row does match the view filter before applying it. */ private void createEntry(Row baseRow) { // Before create a new entry, make sure it matches the view filter if (!matchesViewFilter(baseRow)) return; startNewUpdate(baseRow); currentViewEntryBuilder.addPrimaryKeyLivenessInfo(computeLivenessInfoForEntry(baseRow)); currentViewEntryBuilder.addRowDeletion(baseRow.deletion()); for (ColumnData data : baseRow) { ColumnDefinition viewColumn = view.getViewColumn(data.column()); // If that base table column is not denormalized in the view, we had nothing to do. // Alose, if it's part of the view PK it's already been taken into account in the clustering. if (viewColumn == null || viewColumn.isPrimaryKeyColumn()) continue; addColumnData(viewColumn, data); } submitUpdate(); }
public static Row.Builder copy(Row row, Row.Builder builder) { builder.newRow(row.clustering()); builder.addPrimaryKeyLivenessInfo(row.primaryKeyLivenessInfo()); builder.addRowDeletion(row.deletion()); for (ColumnData cd : row) { if (cd.column().isSimple()) { builder.addCell((Cell)cd); } else { ComplexColumnData complexData = (ComplexColumnData)cd; builder.addComplexDeletion(complexData.column(), complexData.complexDeletion()); for (Cell cell : complexData) builder.addCell(cell); } } return builder; }
public static Row.Builder copy(Row row, Row.Builder builder) { builder.newRow(row.clustering()); builder.addPrimaryKeyLivenessInfo(row.primaryKeyLivenessInfo()); builder.addRowDeletion(row.deletion()); for (ColumnData cd : row) { if (cd.column().isSimple()) { builder.addCell((Cell)cd); } else { ComplexColumnData complexData = (ComplexColumnData)cd; builder.addComplexDeletion(complexData.column(), complexData.complexDeletion()); for (Cell cell : complexData) builder.addCell(cell); } } return builder; }
public static Row.Builder copy(Row row, Row.Builder builder) { builder.newRow(row.clustering()); builder.addPrimaryKeyLivenessInfo(row.primaryKeyLivenessInfo()); builder.addRowDeletion(row.deletion()); for (ColumnData cd : row) { if (cd.column().isSimple()) { builder.addCell((Cell)cd); } else { ComplexColumnData complexData = (ComplexColumnData)cd; builder.addComplexDeletion(complexData.column(), complexData.complexDeletion()); for (Cell cell : complexData) builder.addCell(cell); } } return builder; }
public static Row.Builder copy(Row row, Row.Builder builder) { builder.newRow(row.clustering()); builder.addPrimaryKeyLivenessInfo(row.primaryKeyLivenessInfo()); builder.addRowDeletion(row.deletion()); for (ColumnData cd : row) { if (cd.column().isSimple()) { builder.addCell((Cell)cd); } else { ComplexColumnData complexData = (ComplexColumnData)cd; builder.addComplexDeletion(complexData.column(), complexData.complexDeletion()); for (Cell cell : complexData) builder.addCell(cell); } } return builder; }
public void addRowDeletion() { // For compact tables, at the exclusion of the static row (of static compact tables), each row ever has a single column, // the "compact" one. As such, deleting the row or deleting that single cell is equivalent. We favor the later however // because that makes it easier when translating back to the old format layout (for thrift and pre-3.0 backward // compatibility) as we don't have to special case for the row deletion. This is also in line with what we used to do pre-3.0. if (metadata.isCompactTable() && builder.clustering() != Clustering.STATIC_CLUSTERING && !metadata.isSuper()) addTombstone(metadata.compactValueColumn()); else builder.addRowDeletion(Row.Deletion.regular(deletionTime)); }
public Row.SimpleBuilder delete() { assert !initiated : "If called, delete() should be called before any other column value addition"; builder.addRowDeletion(Row.Deletion.regular(new DeletionTime(timestamp, nowInSec))); return this; }
public void onDeletion(int i, Clustering clustering, Row.Deletion merged, Row.Deletion original) { if (merged != null && !merged.equals(original)) currentRow(i, clustering).addRowDeletion(merged); }
public Row.SimpleBuilder delete() { assert !initiated : "If called, delete() should be called before any other column value addition"; builder.addRowDeletion(Row.Deletion.regular(new DeletionTime(timestamp, nowInSec))); return this; }
public void addRowDeletion() { // For compact tables, at the exclusion of the static row (of static compact tables), each row ever has a single column, // the "compact" one. As such, deleting the row or deleting that single cell is equivalent. We favor the later however // because that makes it easier when translating back to the old format layout (for thrift and pre-3.0 backward // compatibility) as we don't have to special case for the row deletion. This is also in line with what we used to do pre-3.0. if (metadata.isCompactTable() && builder.clustering() != Clustering.STATIC_CLUSTERING) addTombstone(metadata.compactValueColumn()); else builder.addRowDeletion(Row.Deletion.regular(deletionTime)); }
public void onDeletion(int i, Clustering clustering, Row.Deletion merged, Row.Deletion original) { if (merged != null && !merged.equals(original)) currentRow(i, clustering).addRowDeletion(merged); }
public Row.SimpleBuilder delete() { assert !initiated : "If called, delete() should be called before any other column value addition"; builder.addRowDeletion(Row.Deletion.regular(new DeletionTime(timestamp, nowInSec))); return this; }
public void addRowDeletion() { // For compact tables, at the exclusion of the static row (of static compact tables), each row ever has a single column, // the "compact" one. As such, deleting the row or deleting that single cell is equivalent. We favor the later however // because that makes it easier when translating back to the old format layout (for thrift and pre-3.0 backward // compatibility) as we don't have to special case for the row deletion. This is also in line with what we used to do pre-3.0. if (metadata.isCompactTable() && builder.clustering() != Clustering.STATIC_CLUSTERING && !metadata.isSuper()) addTombstone(metadata.compactValueColumn()); else builder.addRowDeletion(Row.Deletion.regular(deletionTime)); }
public void addRowDeletion() { // For compact tables, at the exclusion of the static row (of static compact tables), each row ever has a single column, // the "compact" one. As such, deleting the row or deleting that single cell is equivalent. We favor the later however // because that makes it easier when translating back to the old format layout (for thrift and pre-3.0 backward // compatibility) as we don't have to special case for the row deletion. This is also in line with what we used to do pre-3.0. if (metadata.isCompactTable() && builder.clustering() != Clustering.STATIC_CLUSTERING && !metadata.isSuper()) addTombstone(metadata.compactValueColumn()); else builder.addRowDeletion(Row.Deletion.regular(deletionTime)); }
public void onDeletion(int i, Clustering clustering, Row.Deletion merged, Row.Deletion original) { if (merged != null && !merged.equals(original)) currentRow(i, clustering).addRowDeletion(merged); }