private IModificationOperationCallback createIndexModificationCallback(Operation indexOp, ITransactionContext txnCtx, IMetadataIndex metadataIndex) { switch (indexOp) { case INSERT: case DELETE: /* * Regardless of the index type (primary or secondary index), secondary index modification * callback is given. This is still correct since metadata index operation doesn't require * any lock from ConcurrentLockMgr. */ return new SecondaryIndexModificationOperationCallback(metadataIndex.getDatasetId(), metadataIndex.getPrimaryKeyIndexes(), txnCtx, transactionSubsystem.getLockManager(), transactionSubsystem, metadataIndex.getResourceId(), metadataStoragePartition, ResourceType.LSM_BTREE, indexOp); case UPSERT: return new UpsertOperationCallback(metadataIndex.getDatasetId(), metadataIndex.getPrimaryKeyIndexes(), txnCtx, transactionSubsystem.getLockManager(), transactionSubsystem, metadataIndex.getResourceId(), metadataStoragePartition, ResourceType.LSM_BTREE, indexOp); default: throw new IllegalStateException("Unknown operation type: " + indexOp); } }
ILSMIndexAccessor indexAccessor = lsmIndex.createAccessor(iap); txnCtx.setWriteTxn(true); txnCtx.register(metadataIndex.getResourceId(), StoragePathUtil.getPartitionNumFromRelativePath(resourceName), lsmIndex, modCallback, metadataIndex.isPrimaryIndex());
@Test public void fullMergeTest() throws Exception { String datasetName = "ds"; TestDataUtil.createIdOnlyDataset(datasetName); INcApplicationContext appCtx = (INcApplicationContext) (integrationUtil.ncs[0].getApplicationContext()); IDatasetLifecycleManager dlcm = appCtx.getDatasetLifecycleManager(); IMetadataIndex dsIdx = MetadataPrimaryIndexes.DATASET_DATASET; DatasetInfo datasetInfo = dlcm.getDatasetInfo(dsIdx.getDatasetId().getId()); // flush to ensure multiple disk components dlcm.flushAllDatasets(); datasetInfo.waitForIO(); AbstractLSMIndex index = (AbstractLSMIndex) dlcm.getIndex(dsIdx.getDatasetId().getId(), dsIdx.getResourceId()); Assert.assertTrue(index.getDiskComponents().size() > 1); // trigger full merge and ensure we have a single disk component when merge completes testExecutor.executeSqlppUpdateOrDdl("COMPACT DATASET Metadata.`Dataset`;", TestCaseContext.OutputFormat.CLEAN_JSON); datasetInfo.waitForIO(); Assert.assertTrue(index.getDiskComponents().size() == 1); } }
if (index.getResourceId() != resource.getId()) { throw new HyracksDataException("Resource Id doesn't match expected metadata index resource id");
DatasetInfo datasetInfo = dlcm.getDatasetInfo(idx.getDatasetId().getId()); AbstractLSMIndex index = (AbstractLSMIndex) appCtx.getDatasetLifecycleManager() .getIndex(idx.getDatasetId().getId(), idx.getResourceId()); PrimaryIndexOperationTracker opTracker = (PrimaryIndexOperationTracker) index.getOperationTracker(); final MetadataTransactionContext mdTxn2 = MetadataManager.INSTANCE.beginTransaction();