public View apply(View view) { List<Memtable> flushingMemtables = copyOf(filter(view.flushingMemtables, not(equalTo(memtable)))); assert flushingMemtables.size() == view.flushingMemtables.size() - 1; if (flushed == null || Iterables.isEmpty(flushed)) return new View(view.liveMemtables, flushingMemtables, view.sstablesMap, view.compactingMap, view.intervalTree); Map<SSTableReader, SSTableReader> sstableMap = replace(view.sstablesMap, emptySet(), flushed); return new View(view.liveMemtables, flushingMemtables, sstableMap, view.compactingMap, SSTableIntervalTree.build(sstableMap.keySet())); } };
/** * obsolete every file in the original transaction */ public void obsoleteOriginals() { logger.trace("Staging for obsolescence {}", originals); // if we're obsoleting, we should have no staged updates for the original files assert Iterables.isEmpty(filterIn(staged.update, originals)) : staged.update; // stage obsoletes for any currently visible versions of any original readers Iterables.addAll(staged.obsolete, filterIn(current(), originals)); }
/** * new readers that haven't appeared previously (either in the original set or the logged updates) */ private Iterable<SSTableReader> fresh() { return filterOut(staged.update, originals, logged.update); }
static <T> Predicate<T> idIn(Set<T> set) { return idIn(identityMap(set)); }
/** * update the contents of a set with the provided sets, ensuring that the items to remove are * really present, and that the items to add are not (unless we're also removing them) * @return a new set with the contents of the provided one modified */ static <T> Set<T> replace(Set<T> original, Set<T> remove, Iterable<T> add) { return ImmutableSet.copyOf(replace(identityMap(original), remove, add).keySet()); }
logger.trace("Aborting transaction over {} staged: {}, logged: {}", originals, staged, logged); accumulate = abortObsoletion(obsoletions, accumulate); Iterable<SSTableReader> obsolete = filterOut(concatUniq(staged.update, logged.update), originals); logger.trace("Obsoleting {}", obsolete); accumulate = prepareForObsoletion(obsolete, log, obsoletions = new ArrayList<>(), accumulate); accumulate = markObsolete(obsoletions, accumulate); accumulate = setReplaced(logged.update, accumulate);
private Throwable checkpoint(Throwable accumulate) { if (logger.isTraceEnabled()) logger.trace("Checkpointing staged {}", staged); if (staged.isEmpty()) return accumulate; Set<SSTableReader> toUpdate = toUpdate(); Set<SSTableReader> fresh = copyOf(fresh()); // check the current versions of the readers we're replacing haven't somehow been replaced by someone else checkNotReplaced(filterIn(toUpdate, staged.update)); // ensure any new readers are in the compacting set, since we aren't done with them yet // and don't want anyone else messing with them // apply atomically along with updating the live set of readers tracker.apply(compose(updateCompacting(emptySet(), fresh), updateLiveSet(toUpdate, staged.update))); // log the staged changes and our newly marked readers marked.addAll(fresh); logged.log(staged); // setup our tracker, and mark our prior versions replaced, also releasing our references to them // we do not replace/release obsoleted readers, since we may need to restore them on rollback accumulate = setReplaced(filterOut(toUpdate, staged.obsolete), accumulate); accumulate = release(selfRefs(filterOut(toUpdate, staged.obsolete)), accumulate); staged.clear(); return accumulate; }
Set<SSTableReader> toremove = copyOf(filter(view.sstables, and(remove, notIn(view.compacting)))); return updateLiveSet(toremove, emptySet()).apply(view); }); accumulate = prepareForObsoletion(removed, txnLogs, obsoletions, accumulate); try if (!removed.isEmpty()) accumulate = markObsolete(obsoletions, accumulate); accumulate = updateSizeTracking(removed, emptySet(), accumulate); accumulate = release(selfRefs(removed), accumulate); accumulate = abortObsoletion(obsoletions, accumulate); accumulate = Throwables.merge(accumulate, t);
Set<SSTableReader> toremove = copyOf(filter(view.sstables, and(remove, notIn(view.compacting)))); return updateLiveSet(toremove, emptySet()).apply(view); }); accumulate = prepareForBulkObsoletion(removed, txnLogs, obsoletions, accumulate); try if (!removed.isEmpty()) accumulate = markObsolete(obsoletions, accumulate); accumulate = updateSizeTracking(removed, emptySet(), accumulate); accumulate = release(selfRefs(removed), accumulate); accumulate = abortObsoletion(obsoletions, accumulate); accumulate = Throwables.merge(accumulate, t);
public void doPrepare() { // note for future: in anticompaction two different operations use the same Transaction, and both prepareToCommit() // separately: the second prepareToCommit is ignored as a "redundant" transition. since it is only a checkpoint // (and these happen anyway) this is fine but if more logic gets inserted here than is performed in a checkpoint, // it may break this use case, and care is needed checkpoint(); // prepare for compaction obsolete readers as long as they were part of the original set // since those that are not original are early readers that share the same desc with the finals maybeFail(prepareForObsoletion(filterIn(logged.obsolete, originals), log, obsoletions = new ArrayList<>(), null)); log.prepareToCommit(); }
private Throwable unmarkCompacting(Set<SSTableReader> unmark, Throwable accumulate) { accumulate = tracker.apply(updateCompacting(unmark, emptySet()), accumulate); // when the CFS is invalidated, it will call unreferenceSSTables(). However, unreferenceSSTables only deals // with sstables that aren't currently being compacted. If there are ongoing compactions that finish or are // interrupted after the CFS is invalidated, those sstables need to be unreferenced as well, so we do that here. accumulate = tracker.dropSSTablesIfInvalid(accumulate); return accumulate; }
/** * filter out (i.e. remove) matching elements * @return filter, filtered to only those elements that *are not* present in *any* of the provided sets (are present in none) */ static <T> Iterable<T> filterOut(Iterable<T> filter, Set<T>... inNone) { return filter(filter, notIn(inNone)); }
/** * point of no return: commit all changes, but leave all readers marked as compacting */ public Throwable doCommit(Throwable accumulate) { assert staged.isEmpty() : "must be no actions introduced between prepareToCommit and a commit"; if (logger.isTraceEnabled()) logger.trace("Committing transaction over {} staged: {}, logged: {}", originals, staged, logged); // accumulate must be null if we have been used correctly, so fail immediately if it is not maybeFail(accumulate); // transaction log commit failure means we must abort; safe commit is not possible maybeFail(log.commit(null)); // this is now the point of no return; we cannot safely rollback, so we ignore exceptions until we're done // we restore state by obsoleting our obsolete files, releasing our references to them, and updating our size // and notification status for the obsolete and new files accumulate = markObsolete(obsoletions, accumulate); accumulate = tracker.updateSizeTracking(logged.obsolete, logged.update, accumulate); accumulate = release(selfRefs(logged.obsolete), accumulate); //accumulate = tracker.notifySSTablesChanged(originals, logged.update, log.type(), accumulate); return accumulate; }
/** * update the contents of an "identity map" with the provided sets, ensuring that the items to remove are * really present, and that the items to add are not (unless we're also removing them) * @return a new identity map with the contents of the provided one modified */ static <T> Map<T, T> replace(Map<T, T> original, Set<T> remove, Iterable<T> add) { // ensure the ones being removed are the exact same ones present for (T reader : remove) assert original.get(reader) == reader; // ensure we don't already contain any we're adding, that we aren't also removing assert !any(add, and(not(in(remove)), in(original.keySet()))) : String.format("original:%s remove:%s add:%s", original.keySet(), remove, add); Map<T, T> result = identityMap(concat(add, filter(original.keySet(), not(in(remove))))); assert result.size() == original.size() - remove.size() + Iterables.size(add) : String.format("Expecting new size of %d, got %d while replacing %s by %s in %s", original.size() - remove.size() + Iterables.size(add), result.size(), remove, add, original.keySet()); return result; }
logger.trace("Aborting transaction over {} staged: {}, logged: {}", originals, staged, logged); accumulate = abortObsoletion(obsoletions, accumulate); Iterable<SSTableReader> obsolete = filterOut(concatUniq(staged.update, logged.update), originals); logger.trace("Obsoleting {}", obsolete); accumulate = prepareForObsoletion(obsolete, log, obsoletions = new ArrayList<>(), accumulate); accumulate = markObsolete(obsoletions, accumulate); accumulate = tracker.notifySSTablesChanged(invalid, restored, OperationType.COMPACTION, accumulate); accumulate = setReplaced(logged.update, accumulate);
private Throwable checkpoint(Throwable accumulate) { if (logger.isTraceEnabled()) logger.trace("Checkpointing staged {}", staged); if (staged.isEmpty()) return accumulate; Set<SSTableReader> toUpdate = toUpdate(); Set<SSTableReader> fresh = copyOf(fresh()); // check the current versions of the readers we're replacing haven't somehow been replaced by someone else checkNotReplaced(filterIn(toUpdate, staged.update)); // ensure any new readers are in the compacting set, since we aren't done with them yet // and don't want anyone else messing with them // apply atomically along with updating the live set of readers tracker.apply(compose(updateCompacting(emptySet(), fresh), updateLiveSet(toUpdate, staged.update))); // log the staged changes and our newly marked readers marked.addAll(fresh); logged.log(staged); // setup our tracker, and mark our prior versions replaced, also releasing our references to them // we do not replace/release obsoleted readers, since we may need to restore them on rollback accumulate = setReplaced(filterOut(toUpdate, staged.obsolete), accumulate); accumulate = release(selfRefs(filterOut(toUpdate, staged.obsolete)), accumulate); staged.clear(); return accumulate; }
Set<SSTableReader> toremove = copyOf(filter(view.sstables, and(remove, notIn(view.compacting)))); return updateLiveSet(toremove, emptySet()).apply(view); }); accumulate = prepareForObsoletion(removed, txnLogs, obsoletions, accumulate); try if (!removed.isEmpty()) accumulate = markObsolete(obsoletions, accumulate); accumulate = updateSizeTracking(removed, emptySet(), accumulate); accumulate = release(selfRefs(removed), accumulate); accumulate = abortObsoletion(obsoletions, accumulate); accumulate = Throwables.merge(accumulate, t);
/** * update the contents of a set with the provided sets, ensuring that the items to remove are * really present, and that the items to add are not (unless we're also removing them) * @return a new set with the contents of the provided one modified */ static <T> Set<T> replace(Set<T> original, Set<T> remove, Iterable<T> add) { return ImmutableSet.copyOf(replace(identityMap(original), remove, add).keySet()); }