/** * remove the provided readers from this Transaction, and return a new Transaction to manage them * only permitted to be called if the current Transaction has never been used */ public LifecycleTransaction split(Collection<SSTableReader> readers) { logger.trace("Splitting {} into new transaction", readers); checkUnused(); for (SSTableReader reader : readers) assert identities.contains(reader.instanceId) : "may only split the same reader instance the transaction was opened with: " + reader; for (SSTableReader reader : readers) { identities.remove(reader.instanceId); originals.remove(reader); marked.remove(reader); } return new LifecycleTransaction(tracker, log.type(), readers); }
public boolean reduceScopeForLimitedSpace(long expectedSize) { if (partialCompactionsAcceptable() && transaction.originals().size() > 1) { // Try again w/o the largest one. logger.warn("insufficient space to compact all requested files. {}MB required, {}", (float) expectedSize / 1024 / 1024, StringUtils.join(transaction.originals(), ", ")); // Note that we have removed files that are still marked as compacting. // This suboptimal but ok since the caller will unmark all the sstables at the end. SSTableReader removedSSTable = cfs.getMaxSizeFile(transaction.originals()); transaction.cancel(removedSSTable); return true; } return false; }
/** * call when a consistent batch of changes is ready to be made atomically visible * these will be exposed in the Tracker atomically, or an exception will be thrown; in this case * the transaction should be rolled back */ public void checkpoint() { maybeFail(checkpoint(null)); } private Throwable checkpoint(Throwable accumulate)
if (transaction.isOffline()) return; if (preemptiveOpenInterval == Long.MAX_VALUE) for (SSTableReader sstable : transaction.originals()) final SSTableReader latest = transaction.current(sstable); if (lowerbound.compareTo(latest.last) >= 0) if (!transaction.isObsolete(latest)) transaction.obsolete(latest); assert newStart != null; SSTableReader replacement = latest.cloneWithNewStart(newStart, runOnClose); transaction.update(replacement, true);
protected void doPrepare() { switchWriter(null); if (throwEarly) throw new RuntimeException("exception thrown early in finish, for testing"); // No early open to finalize and replace for (SSTableWriter writer : writers) { assert writer.getFilePointer() > 0; writer.setRepairedAt(repairedAt).setOpenResult(true).prepareToCommit(); SSTableReader reader = writer.finished(); transaction.update(reader, false); preparedForCommit.add(reader); } transaction.checkpoint(); if (throwLate) throw new RuntimeException("exception thrown after all sstables finished, for testing"); if (!keepOriginals) transaction.obsoleteOriginals(); transaction.prepareToCommit(); }
private void maybeReopenEarly(DecoratedKey key) { if (writer.getFilePointer() - currentlyOpenedEarlyAt > preemptiveOpenInterval) { if (transaction.isOffline()) { for (SSTableReader reader : transaction.originals()) { RowIndexEntry index = reader.getPosition(key, SSTableReader.Operator.GE); NativeLibrary.trySkipCache(reader.getFilename(), 0, index == null ? 0 : index.position); } } else { SSTableReader reader = writer.setMaxDataAge(maxAge).openEarly(); if (reader != null) { transaction.update(reader, false); currentlyOpenedEarlyAt = writer.getFilePointer(); moveStarts(reader, reader.last); transaction.checkpoint(); } } } }
public void switchWriter(SSTableWriter newWriter) { if (newWriter != null) writers.add(newWriter.setMaxDataAge(maxAge)); if (writer == null || writer.getFilePointer() == 0) { if (writer != null) { writer.abort(); transaction.untrackNew(writer); writers.remove(writer); } writer = newWriter; return; } if (preemptiveOpenInterval != Long.MAX_VALUE) { // we leave it as a tmp file, but we open it and add it to the Tracker SSTableReader reader = writer.setMaxDataAge(maxAge).openFinalEarly(); transaction.update(reader, false); moveStarts(reader, reader.last); transaction.checkpoint(); } currentlyOpenedEarlyAt = 0; writer = newWriter; }
private Throwable checkpoint(Throwable accumulate) { if (logger.isTraceEnabled()) logger.trace("Checkpointing staged {}", staged); if (staged.isEmpty()) return accumulate; Set<SSTableReader> toUpdate = toUpdate(); Set<SSTableReader> fresh = copyOf(fresh()); // check the current versions of the readers we're replacing haven't somehow been replaced by someone else checkNotReplaced(filterIn(toUpdate, staged.update)); // ensure any new readers are in the compacting set, since we aren't done with them yet // and don't want anyone else messing with them // apply atomically along with updating the live set of readers tracker.apply(compose(updateCompacting(emptySet(), fresh), updateLiveSet(toUpdate, staged.update))); // log the staged changes and our newly marked readers marked.addAll(fresh); logged.log(staged); // setup our tracker, and mark our prior versions replaced, also releasing our references to them // we do not replace/release obsoleted readers, since we may need to restore them on rollback accumulate = setReplaced(filterOut(toUpdate, staged.obsolete), accumulate); accumulate = release(selfRefs(filterOut(toUpdate, staged.obsolete)), accumulate); staged.clear(); return accumulate; }
public StreamReceiveTask(StreamSession session, UUID cfId, int totalFiles, long totalSize) { super(session, cfId); this.totalFiles = totalFiles; this.totalSize = totalSize; // this is an "offline" transaction, as we currently manually expose the sstables once done; // this should be revisited at a later date, so that LifecycleTransaction manages all sstable state changes this.txn = LifecycleTransaction.offline(OperationType.STREAM); this.sstables = new ArrayList<>(totalFiles); }
@Override public Iterable<SSTableReader> filterSSTables(LifecycleTransaction input) { return input.originals(); }
/** * mark this reader as for obsoletion : on checkpoint() the reader will be removed from the live set */ public void obsolete(SSTableReader reader) { logger.trace("Staging for obsolescence {}", reader); // check this is: a reader guarded by the transaction, an instance we have already worked with // and that we haven't already obsoleted it, nor do we have other changes staged for it assert identities.contains(reader.instanceId) : "only reader instances that have previously been provided may be obsoleted: " + reader; assert originals.contains(reader) : "only readers in the 'original' set may be obsoleted: " + reader + " vs " + originals; assert !(logged.obsolete.contains(reader) || staged.obsolete.contains(reader)) : "may not obsolete a reader that has already been obsoleted: " + reader; assert !staged.update.contains(reader) : "may not obsolete a reader that has a staged update (must checkpoint first): " + reader; assert current(reader) == reader : "may only obsolete the latest version of the reader: " + reader; staged.obsolete.add(reader); }
/** * @return a Transaction over the provided sstables if we are able to mark the given @param sstables as compacted, before anyone else */ public LifecycleTransaction tryModify(Iterable<SSTableReader> sstables, OperationType operationType) { if (Iterables.isEmpty(sstables)) return new LifecycleTransaction(this, operationType, sstables); if (null == apply(permitCompacting(sstables), updateCompacting(emptySet(), sstables))) return null; return new LifecycleTransaction(this, operationType, sstables); }
/** * remove the readers from the set we're modifying */ public void cancel(Iterable<SSTableReader> cancels) { for (SSTableReader cancel : cancels) cancel(cancel); }
List<SSTableReader> restored = restoreUpdatedOriginals(); List<SSTableReader> invalid = Lists.newArrayList(Iterables.concat(logged.update, logged.obsolete)); accumulate = tracker.apply(updateLiveSet(logged.update, restored), accumulate);
protected void doPrepare() { switchWriter(null); if (throwEarly) throw new RuntimeException("exception thrown early in finish, for testing"); // No early open to finalize and replace for (SSTableWriter writer : writers) { assert writer.getFilePointer() > 0; writer.setRepairedAt(repairedAt).setOpenResult(true).prepareToCommit(); SSTableReader reader = writer.finished(); transaction.update(reader, false); preparedForCommit.add(reader); } transaction.checkpoint(); if (throwLate) throw new RuntimeException("exception thrown after all sstables finished, for testing"); if (!keepOriginals) transaction.obsoleteOriginals(); transaction.prepareToCommit(); }
private void maybeReopenEarly(DecoratedKey key) { if (writer.getFilePointer() - currentlyOpenedEarlyAt > preemptiveOpenInterval) { if (transaction.isOffline()) { for (SSTableReader reader : transaction.originals()) { RowIndexEntry index = reader.getPosition(key, SSTableReader.Operator.GE); NativeLibrary.trySkipCache(reader.getFilename(), 0, index == null ? 0 : index.position); } } else { SSTableReader reader = writer.setMaxDataAge(maxAge).openEarly(); if (reader != null) { transaction.update(reader, false); currentlyOpenedEarlyAt = writer.getFilePointer(); moveStarts(reader, reader.last); transaction.checkpoint(); } } } }
public void switchWriter(SSTableWriter newWriter) { if (newWriter != null) writers.add(newWriter.setMaxDataAge(maxAge)); if (writer == null || writer.getFilePointer() == 0) { if (writer != null) { writer.abort(); transaction.untrackNew(writer); writers.remove(writer); } writer = newWriter; return; } if (preemptiveOpenInterval != Long.MAX_VALUE) { // we leave it as a tmp file, but we open it and add it to the Tracker SSTableReader reader = writer.setMaxDataAge(maxAge).openFinalEarly(); transaction.update(reader, false); moveStarts(reader, reader.last); transaction.checkpoint(); } currentlyOpenedEarlyAt = 0; writer = newWriter; }
private Throwable checkpoint(Throwable accumulate) { if (logger.isTraceEnabled()) logger.trace("Checkpointing staged {}", staged); if (staged.isEmpty()) return accumulate; Set<SSTableReader> toUpdate = toUpdate(); Set<SSTableReader> fresh = copyOf(fresh()); // check the current versions of the readers we're replacing haven't somehow been replaced by someone else checkNotReplaced(filterIn(toUpdate, staged.update)); // ensure any new readers are in the compacting set, since we aren't done with them yet // and don't want anyone else messing with them // apply atomically along with updating the live set of readers tracker.apply(compose(updateCompacting(emptySet(), fresh), updateLiveSet(toUpdate, staged.update))); // log the staged changes and our newly marked readers marked.addAll(fresh); logged.log(staged); // setup our tracker, and mark our prior versions replaced, also releasing our references to them // we do not replace/release obsoleted readers, since we may need to restore them on rollback accumulate = setReplaced(filterOut(toUpdate, staged.obsolete), accumulate); accumulate = release(selfRefs(filterOut(toUpdate, staged.obsolete)), accumulate); staged.clear(); return accumulate; }