Tabnine Logo
Tracker
Code IndexAdd Tabnine to your IDE (free)

How to use
Tracker
in
org.apache.cassandra.db.lifecycle

Best Java code snippets using org.apache.cassandra.db.lifecycle.Tracker (Showing top 20 results out of 315)

origin: jsevellec/cassandra-unit

public LifecycleTransaction tryModify(SSTableReader sstable, OperationType operationType)
{
  return tryModify(singleton(sstable), operationType);
}
origin: jsevellec/cassandra-unit

/**
 * construct a Transaction for use in an offline operation
 */
public static LifecycleTransaction offline(OperationType operationType, Iterable<SSTableReader> readers)
{
  // if offline, for simplicity we just use a dummy tracker
  Tracker dummy = new Tracker(null, false);
  dummy.addInitialSSTables(readers);
  dummy.apply(updateCompacting(emptySet(), readers));
  return new LifecycleTransaction(dummy, operationType, readers);
}
origin: org.apache.cassandra/cassandra-all

public void replaceFlushed(Memtable memtable, Iterable<SSTableReader> sstables)
{
  assert !isDummy();
  if (Iterables.isEmpty(sstables))
  {
    // sstable may be null if we flushed batchlog and nothing needed to be retained
    // if it's null, we don't care what state the cfstore is in, we just replace it and continue
    apply(View.replaceFlushed(memtable, null));
    return;
  }
  sstables.forEach(SSTableReader::setupOnline);
  // back up before creating a new Snapshot (which makes the new one eligible for compaction)
  maybeIncrementallyBackup(sstables);
  apply(View.replaceFlushed(memtable, sstables));
  Throwable fail;
  fail = updateSizeTracking(emptySet(), sstables, null);
  notifyDiscarded(memtable);
  // TODO: if we're invalidated, should we notifyadded AND removed, or just skip both?
  fail = notifyAdded(sstables, fail);
  if (!isDummy() && !cfstore.isValid())
    dropSSTables();
  maybeFail(fail);
}
origin: org.apache.cassandra/cassandra-all

public void addSSTables(Iterable<SSTableReader> sstables)
{
  addInitialSSTables(sstables);
  maybeIncrementallyBackup(sstables);
  notifyAdded(sstables);
}
origin: org.apache.cassandra/cassandra-all

public Throwable dropSSTablesIfInvalid(Throwable accumulate)
{
  if (!isDummy() && !cfstore.isValid())
    accumulate = dropSSTables(accumulate);
  return accumulate;
}
origin: org.apache.cassandra/cassandra-all

public void addInitialSSTables(Iterable<SSTableReader> sstables)
{
  if (!isDummy())
    setupOnline(sstables);
  apply(updateLiveSet(emptySet(), sstables));
  maybeFail(updateSizeTracking(emptySet(), sstables, null));
  // no notifications or backup necessary
}
origin: org.apache.cassandra/cassandra-all

Pair<View, View> result = apply(view -> {
  Set<SSTableReader> toremove = copyOf(filter(view.sstables, and(remove, notIn(view.compacting))));
  return updateLiveSet(toremove, emptySet()).apply(view);
});
    accumulate = updateSizeTracking(removed, emptySet(), accumulate);
    accumulate = release(selfRefs(removed), accumulate);
    accumulate = notifySSTablesChanged(removed, Collections.<SSTableReader>emptySet(), txnLogs.type(), accumulate);
origin: org.apache.cassandra/cassandra-all

  @VisibleForTesting
  public void removeUnsafe(Set<SSTableReader> toRemove)
  {
    Pair<View, View> result = apply(view -> {
      return updateLiveSet(toRemove, emptySet()).apply(view);
    });
  }
}
origin: com.netflix.sstableadaptor/sstable-adaptor-cassandra

Pair<View, View> result = apply(view -> {
  Set<SSTableReader> toremove = copyOf(filter(view.sstables, and(remove, notIn(view.compacting))));
  return updateLiveSet(toremove, emptySet()).apply(view);
});
    accumulate = updateSizeTracking(removed, emptySet(), accumulate);
    accumulate = release(selfRefs(removed), accumulate);
origin: org.apache.cassandra/cassandra-all

private Throwable unmarkCompacting(Set<SSTableReader> unmark, Throwable accumulate)
{
  accumulate = tracker.apply(updateCompacting(unmark, emptySet()), accumulate);
  // when the CFS is invalidated, it will call unreferenceSSTables().  However, unreferenceSSTables only deals
  // with sstables that aren't currently being compacted.  If there are ongoing compactions that finish or are
  // interrupted after the CFS is invalidated, those sstables need to be unreferenced as well, so we do that here.
  accumulate = tracker.dropSSTablesIfInvalid(accumulate);
  return accumulate;
}
origin: org.apache.cassandra/cassandra-all

public Throwable dropSSTables(Throwable accumulate)
{
  return dropSSTables(Predicates.<SSTableReader>alwaysTrue(), OperationType.UNKNOWN, accumulate);
}
origin: org.apache.cassandra/cassandra-all

  public LifecycleTransaction call()
  {
    assert data.getCompacting().isEmpty() : data.getCompacting();
    Iterable<SSTableReader> sstables = getLiveSSTables();
    sstables = AbstractCompactionStrategy.filterSuspectSSTables(sstables);
    sstables = ImmutableList.copyOf(sstables);
    LifecycleTransaction modifier = data.tryModify(sstables, operationType);
    assert modifier != null: "something marked things compacting while compactions are disabled";
    return modifier;
  }
};
origin: com.strapdata.cassandra/cassandra-all

if (DatabaseDescriptor.isDaemonInitialized())
  initialMemtable = new Memtable(new AtomicReference<>(CommitLog.instance.getCurrentPosition()), this);
data = new Tracker(initialMemtable, loadSSTables);
  data.addInitialSSTables(sstables);
origin: jsevellec/cassandra-unit

public boolean isOffline()
{
  return tracker.isDummy();
}
origin: org.apache.cassandra/cassandra-all

/**
 * point of no return: commit all changes, but leave all readers marked as compacting
 */
public Throwable doCommit(Throwable accumulate)
{
  assert staged.isEmpty() : "must be no actions introduced between prepareToCommit and a commit";
  if (logger.isTraceEnabled())
    logger.trace("Committing transaction over {} staged: {}, logged: {}", originals, staged, logged);
  // accumulate must be null if we have been used correctly, so fail immediately if it is not
  maybeFail(accumulate);
  // transaction log commit failure means we must abort; safe commit is not possible
  maybeFail(log.commit(null));
  // this is now the point of no return; we cannot safely rollback, so we ignore exceptions until we're done
  // we restore state by obsoleting our obsolete files, releasing our references to them, and updating our size
  // and notification status for the obsolete and new files
  accumulate = markObsolete(obsoletions, accumulate);
  accumulate = tracker.updateSizeTracking(logged.obsolete, logged.update, accumulate);
  accumulate = release(selfRefs(logged.obsolete), accumulate);
  accumulate = tracker.notifySSTablesChanged(originals, logged.update, log.type(), accumulate);
  return accumulate;
}
origin: org.apache.cassandra/cassandra-all

/**
 * construct an empty Transaction with no existing readers
 */
@SuppressWarnings("resource") // log closed during postCleanup
public static LifecycleTransaction offline(OperationType operationType)
{
  Tracker dummy = new Tracker(null, false);
  return new LifecycleTransaction(dummy, new LogTransaction(operationType, dummy), Collections.emptyList());
}
origin: org.apache.cassandra/cassandra-all

public void addSSTables(Collection<SSTableReader> sstables)
{
  data.addSSTables(sstables);
  CompactionManager.instance.submitBackground(this);
}
origin: org.apache.cassandra/cassandra-all

Throwable apply(Function<View, View> function, Throwable accumulate)
{
  try
  {
    apply(function);
  }
  catch (Throwable t)
  {
    accumulate = merge(accumulate, t);
  }
  return accumulate;
}
origin: com.netflix.sstableadaptor/sstable-adaptor-cassandra

/**
 * point of no return: commit all changes, but leave all readers marked as compacting
 */
public Throwable doCommit(Throwable accumulate)
{
  assert staged.isEmpty() : "must be no actions introduced between prepareToCommit and a commit";
  if (logger.isTraceEnabled())
    logger.trace("Committing transaction over {} staged: {}, logged: {}", originals, staged, logged);
  // accumulate must be null if we have been used correctly, so fail immediately if it is not
  maybeFail(accumulate);
  // transaction log commit failure means we must abort; safe commit is not possible
  maybeFail(log.commit(null));
  // this is now the point of no return; we cannot safely rollback, so we ignore exceptions until we're done
  // we restore state by obsoleting our obsolete files, releasing our references to them, and updating our size
  // and notification status for the obsolete and new files
  accumulate = markObsolete(obsoletions, accumulate);
  accumulate = tracker.updateSizeTracking(logged.obsolete, logged.update, accumulate);
  accumulate = release(selfRefs(logged.obsolete), accumulate);
  //accumulate = tracker.notifySSTablesChanged(originals, logged.update, log.type(), accumulate);
  return accumulate;
}
origin: jsevellec/cassandra-unit

public void addInitialSSTables(Iterable<SSTableReader> sstables)
{
  if (!isDummy())
    setupOnline(sstables);
  apply(updateLiveSet(emptySet(), sstables));
  maybeFail(updateSizeTracking(emptySet(), sstables, null));
  // no notifications or backup necessary
}
org.apache.cassandra.db.lifecycleTracker

Javadoc

Tracker tracks live View of data store for a table.

Most used methods

  • tryModify
  • <init>
  • addInitialSSTables
  • apply
    atomically tests permit against the view and applies function to it, if permit yields true, returnin
  • dropSSTables
  • dropSSTablesIfInvalid
  • emptySet
  • isDummy
  • updateSizeTracking
  • addSSTables
  • getCompacting
  • getMemtableFor
    get the Memtable that the ordered writeOp should be directed to
  • getCompacting,
  • getMemtableFor,
  • getUncompacting,
  • getView,
  • markFlushing,
  • maybeIncrementallyBackup,
  • notify,
  • notifyAdded,
  • notifyDeleting,
  • notifyDiscarded

Popular in Java

  • Finding current android device location
  • requestLocationUpdates (LocationManager)
  • runOnUiThread (Activity)
  • getOriginalFilename (MultipartFile)
    Return the original filename in the client's filesystem.This may contain path information depending
  • FileNotFoundException (java.io)
    Thrown when a file specified by a program cannot be found.
  • InputStreamReader (java.io)
    A class for turning a byte stream into a character stream. Data read from the source input stream is
  • Scanner (java.util)
    A parser that parses a text string of primitive types and strings with the help of regular expressio
  • ExecutorService (java.util.concurrent)
    An Executor that provides methods to manage termination and methods that can produce a Future for tr
  • TimeUnit (java.util.concurrent)
    A TimeUnit represents time durations at a given unit of granularity and provides utility methods to
  • Option (scala)
  • Github Copilot alternatives
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now