Tabnine Logo
ILSMIndexAccessor
Code IndexAdd Tabnine to your IDE (free)

How to use
ILSMIndexAccessor
in
org.apache.hyracks.storage.am.lsm.common.api

Best Java code snippets using org.apache.hyracks.storage.am.lsm.common.api.ILSMIndexAccessor (Showing top 20 results out of 315)

origin: apache/asterixdb

@Override
public Map<String, Object> getParameters() {
  return accessor.getOpContext().getParameters();
}
origin: apache/asterixdb

private void triggerScheduleMerge(ILSMIndex index, List<ILSMDiskComponent> immutableComponents, int startIndex,
    int endIndex) throws HyracksDataException {
  List<ILSMDiskComponent> mergableComponents =
      new ArrayList<>(immutableComponents.subList(startIndex, endIndex + 1));
  // Reverse the components order back to its original order
  Collections.reverse(mergableComponents);
  index.createAccessor(NoOpIndexAccessParameters.INSTANCE).scheduleMerge(mergableComponents);
}
origin: apache/asterixdb

private static void redo(ILogRecord logRecord, IDatasetLifecycleManager datasetLifecycleManager) {
  try {
    int datasetId = logRecord.getDatasetId();
    long resourceId = logRecord.getResourceId();
    ILSMIndex index = (ILSMIndex) datasetLifecycleManager.getIndex(datasetId, resourceId);
    ILSMIndexAccessor indexAccessor = index.createAccessor(NoOpIndexAccessParameters.INSTANCE);
    ILSMIndexOperationContext opCtx = indexAccessor.getOpContext();
    opCtx.setFilterSkip(true);
    opCtx.setRecovery(true);
    if (logRecord.getNewOp() == AbstractIndexModificationOperationCallback.INSERT_BYTE) {
      indexAccessor.forceInsert(logRecord.getNewValue());
    } else if (logRecord.getNewOp() == AbstractIndexModificationOperationCallback.DELETE_BYTE) {
      indexAccessor.forceDelete(logRecord.getNewValue());
    } else if (logRecord.getNewOp() == AbstractIndexModificationOperationCallback.UPSERT_BYTE) {
      // redo, upsert the new value
      indexAccessor.forceUpsert(logRecord.getNewValue());
    } else if (logRecord.getNewOp() == AbstractIndexModificationOperationCallback.FILTER_BYTE) {
      opCtx.setFilterSkip(false);
      indexAccessor.updateFilter(logRecord.getNewValue());
    } else {
      throw new IllegalStateException("Unsupported OperationType: " + logRecord.getNewOp());
    }
  } catch (Exception e) {
    throw new IllegalStateException("Failed to redo", e);
  }
}
origin: apache/asterixdb

    (ILSMIndex) datasetLifecycleManager.getIndex(logRecord.getDatasetId(), logRecord.getResourceId());
ILSMIndexAccessor indexAccessor = index.createAccessor(NoOpIndexAccessParameters.INSTANCE);
ILSMIndexOperationContext opCtx = indexAccessor.getOpContext();
opCtx.setFilterSkip(true);
try {
  switch (logRecord.getNewOp()) {
    case AbstractIndexModificationOperationCallback.INSERT_BYTE:
      indexAccessor.forceDelete(logRecord.getNewValue());
      break;
    case AbstractIndexModificationOperationCallback.DELETE_BYTE:
  indexAccessor.destroy();
origin: apache/asterixdb

for (ILSMIndex lsmIndex : dsInfo.getDatasetPartitionOpenIndexes(partition)) {
  ILSMIndexAccessor accessor = lsmIndex.createAccessor(NoOpIndexAccessParameters.INSTANCE);
  accessor.getOpContext().setParameters(flushMap);
  ILSMIOOperation flush = accessor.scheduleFlush();
  lastFlushTime = System.nanoTime();
  scheduledFlushes.put(flush.getTarget().getRelativePath(), (FlushOperation) flush);
origin: apache/asterixdb

case INSERT:
  if (i == 0 && isPrimary) {
    lsmAccessor.insert(tuple);
  } else {
    lsmAccessor.forceInsert(tuple);
    lsmAccessor.delete(tuple);
  } else {
    lsmAccessor.forceDelete(tuple);
origin: apache/asterixdb

if (!lsmAccessor.tryInsert(tuple)) {
  flushPartialFrame(nextFlushTupleIndex, i);
  nextFlushTupleIndex = i;
  lsmAccessor.insert(tuple);
if (!lsmAccessor.tryDelete(tuple)) {
  flushPartialFrame(nextFlushTupleIndex, i);
  nextFlushTupleIndex = i;
  lsmAccessor.delete(tuple);
if (!lsmAccessor.tryUpsert(tuple)) {
  flushPartialFrame(nextFlushTupleIndex, i);
  nextFlushTupleIndex = i;
  lsmAccessor.upsert(tuple);
if (!lsmAccessor.tryUpdate(tuple)) {
  flushPartialFrame(nextFlushTupleIndex, i);
  nextFlushTupleIndex = i;
  lsmAccessor.update(tuple);
origin: apache/asterixdb

  @Override
  public void run() {
    ILSMIndexAccessor lsmAccessor = lsmBtree.createAccessor(NoOpIndexAccessParameters.INSTANCE);
    try {
      dsLifecycleMgr.getComponentIdGenerator(StorageTestUtils.DATASET_ID, PARTITION, indexPath)
          .refresh();
      ILSMComponentId next = dsLifecycleMgr
          .getComponentIdGenerator(StorageTestUtils.DATASET_ID, PARTITION, indexPath).getId();
      long flushLsn = nc.getTransactionSubsystem().getLogManager().getAppendLSN();
      Map<String, Object> flushMap = new HashMap<>();
      flushMap.put(LSMIOOperationCallback.KEY_FLUSH_LOG_LSN, flushLsn);
      flushMap.put(LSMIOOperationCallback.KEY_NEXT_COMPONENT_ID, next);
      lsmAccessor.getOpContext().setParameters(flushMap);
      lsmAccessor.deleteComponents(predicate);
    } catch (HyracksDataException e) {
      failure = e;
    }
  }
};
origin: apache/asterixdb

accessor.insert(tuple1);
accessor.scheduleFlush();
accessor.insert(tuple2);
accessor.scheduleFlush();
accessor.insert(tuple3);
accessor.scheduleFlush();
scheduler.modify = true;
ILSMIOOperation merge = accessor.scheduleMerge(btree.getDiskComponents());
merge.sync();
Assert.assertEquals(LSMIOOperationStatus.FAILURE, merge.getStatus());
scheduler.modify = false;
accessor.scheduleMerge(btree.getDiskComponents());
Assert.assertEquals(1, btree.getDiskComponents().size());
btree.deactivate();
origin: apache/asterixdb

for (int i = 0; i < NUM_TUPLES; i++) {
  TupleUtils.createIntegerTuple(builder, tuple, i);
  accessor.insert(tuple);
  ILSMIOOperation flush = accessor.scheduleFlush();
  flush.sync();
  if (flush.getStatus() == LSMIOOperationStatus.FAILURE) {
  accessor.upsert(tuple);
  accessor.scheduleFlush().sync();
  isFoundNull = true;
} else {
  accessor.delete(tuple);
accessor.scheduleFlush().sync();
origin: apache/asterixdb

switch (op) {
  case INSERT:
    indexAccessor.forceInsert(tuple);
    break;
  case DELETE:
    indexAccessor.forceDelete(tuple);
    break;
  case UPSERT:
    indexAccessor.forceUpsert(tuple);
    break;
  default:
origin: apache/asterixdb

  ILSMIOOperation flush = accessor.scheduleFlush();
  flush.sync();
  if (flush.getStatus() == LSMIOOperationStatus.FAILURE) {
accessor.scheduleMerge(((LSMBTree) ctx.getIndex()).getDiskComponents());
origin: apache/asterixdb

lsmAccessor.forceDelete(prevValueTuple);
lsmAccessor.forceInsert(tuple);
origin: apache/asterixdb

  @Override
  public void completeOperation(ILSMIndex index, LSMOperationType opType, ISearchOperationCallback searchCallback,
      IModificationOperationCallback modificationCallback) throws HyracksDataException {
    // Flush will only be handled by last exiting thread.
    if (opType == LSMOperationType.MODIFICATION && threadRefCount.decrementAndGet() == 0
        && index.hasFlushRequestForCurrentMutableComponent()) {
      ILSMIndexAccessor accessor = index.createAccessor(NoOpIndexAccessParameters.INSTANCE);
      accessor.scheduleFlush();
    }
  }
}
origin: apache/asterixdb

accessor.scheduleFlush();
accessor.scheduleFlush();
accessor.scheduleFlush();
IIndexCursor cursor = accessor.createSearchCursor(false);
accessor.scanDiskComponents(cursor);
origin: apache/asterixdb

tuple.reset(tupleBuilder.getFieldEndOffsets(), tupleBuilder.getByteArray());
ILSMIndexAccessor accessor = (ILSMIndexAccessor) ctx.getIndexAccessor();
accessor.insert(tuple);
accessor.scheduleFlush();
origin: apache/asterixdb

public void open() throws HyracksDataException {
  // Open the index and get the instance
  indexDataflowHelper.open();
  index = (ExternalBTree) indexDataflowHelper.getIndexInstance();
  // Create search key and search predicate objects
  searchKey = new ArrayTupleReference();
  searchKeyTupleBuilder = new ArrayTupleBuilder(FilesIndexDescription.FILE_KEY_SIZE);
  searchKeyTupleBuilder.reset();
  searchKeyTupleBuilder.addField(intSerde, currentFileNumber);
  searchKey.reset(searchKeyTupleBuilder.getFieldEndOffsets(), searchKeyTupleBuilder.getByteArray());
  MultiComparator searchCmp = BTreeUtils.getSearchMultiComparator(index.getComparatorFactories(), searchKey);
  searchPredicate = new RangePredicate(searchKey, searchKey, true, true, searchCmp, searchCmp);
  // create the accessor  and the cursor using the passed version
  ISearchOperationCallback searchCallback = searchCallbackFactory
      .createSearchOperationCallback(indexDataflowHelper.getResource().getId(), ctx, null);
  fileIndexAccessor = index.createAccessor(searchCallback, version);
  fileIndexSearchCursor = fileIndexAccessor.createSearchCursor(false);
}
origin: apache/asterixdb

flushMap.put(LSMIOOperationCallback.KEY_FLUSH_LOG_LSN, flushLsn);
flushMap.put(LSMIOOperationCallback.KEY_NEXT_COMPONENT_ID, next);
lsmAccessor.getOpContext().setParameters(flushMap);
lsmAccessor.deleteComponents(memoryComponentsPredicate);
StorageTestUtils.searchAndAssertCount(nc, PARTITION,
    StorageTestUtils.TOTAL_NUM_OF_RECORDS - StorageTestUtils.RECORDS_PER_COMPONENT);
flushMap.put(LSMIOOperationCallback.KEY_FLUSH_LOG_LSN, flushLsn);
flushMap.put(LSMIOOperationCallback.KEY_NEXT_COMPONENT_ID, next);
lsmAccessor.getOpContext().setParameters(flushMap);
DiskComponentLsnPredicate pred = new DiskComponentLsnPredicate(lsn);
lsmAccessor.deleteComponents(pred);
StorageTestUtils.searchAndAssertCount(nc, PARTITION,
    StorageTestUtils.TOTAL_NUM_OF_RECORDS - (2 * StorageTestUtils.RECORDS_PER_COMPONENT));
origin: apache/asterixdb

flushMap.put(LSMIOOperationCallback.KEY_FLUSH_LOG_LSN, flushLsn);
ILSMIndexAccessor accessor = index.createAccessor(NoOpIndexAccessParameters.INSTANCE);
accessor.getOpContext().setParameters(flushMap);
long minId = logRecord.getFlushingComponentMinId();
long maxId = logRecord.getFlushingComponentMaxId();
ILSMIOOperation flush = accessor.scheduleFlush();
try {
  flush.sync();
origin: apache/asterixdb

private ILSMIOOperation scheduleFlush(OrderedIndexTestContext ctx)
    throws HyracksDataException, InterruptedException {
  ILSMIndexAccessor accessor =
      (ILSMIndexAccessor) ctx.getIndex().createAccessor(NoOpIndexAccessParameters.INSTANCE);
  return accessor.scheduleFlush();
}
org.apache.hyracks.storage.am.lsm.common.apiILSMIndexAccessor

Javadoc

Client handle for performing operations (insert/delete/update/search/diskorderscan/merge/flush) on an ILSMHarness. An ILSMIndexAccessor is not thread safe, but different ILSMIndexAccessors can concurrently operate on the same ILSMIndex (i.e., the ILSMIndex must allow concurrent operations).

Most used methods

  • getOpContext
  • scheduleMerge
    Schedule a merge operation
  • forceDelete
    Force deleting an index entry even if the memory component is full replace the entry if found with a
  • forceInsert
    Insert a new tuple (failing if duplicate key entry is found)
  • scheduleFlush
    Schedule a flush operation
  • delete
  • insert
  • createSearchCursor
  • forceUpsert
    Force upserting the tuple into the memory component even if it is full
  • scanDiskComponents
    Open the given cursor for scanning all disk components of the primary index. The returned tuple has
  • upsert
  • deleteComponents
    Delete components that match the passed predicate NOTE: This call can only be made when the caller k
  • upsert,
  • deleteComponents,
  • destroy,
  • flush,
  • forcePhysicalDelete,
  • merge,
  • scheduleFullMerge,
  • scheduleReplication,
  • search,
  • tryDelete

Popular in Java

  • Reactive rest calls using spring rest template
  • addToBackStack (FragmentTransaction)
  • getContentResolver (Context)
  • getSharedPreferences (Context)
  • Window (java.awt)
    A Window object is a top-level window with no borders and no menubar. The default layout for a windo
  • RandomAccessFile (java.io)
    Allows reading from and writing to a file in a random-access manner. This is different from the uni-
  • Dictionary (java.util)
    Note: Do not use this class since it is obsolete. Please use the Map interface for new implementatio
  • List (java.util)
    An ordered collection (also known as a sequence). The user of this interface has precise control ove
  • JCheckBox (javax.swing)
  • FileUtils (org.apache.commons.io)
    General file manipulation utilities. Facilities are provided in the following areas: * writing to a
  • Top Vim plugins
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now