private IModificationOperationCallback createIndexModificationCallback(Operation indexOp, ITransactionContext txnCtx, IMetadataIndex metadataIndex) { switch (indexOp) { case INSERT: case DELETE: /* * Regardless of the index type (primary or secondary index), secondary index modification * callback is given. This is still correct since metadata index operation doesn't require * any lock from ConcurrentLockMgr. */ return new SecondaryIndexModificationOperationCallback(metadataIndex.getDatasetId(), metadataIndex.getPrimaryKeyIndexes(), txnCtx, transactionSubsystem.getLockManager(), transactionSubsystem, metadataIndex.getResourceId(), metadataStoragePartition, ResourceType.LSM_BTREE, indexOp); case UPSERT: return new UpsertOperationCallback(metadataIndex.getDatasetId(), metadataIndex.getPrimaryKeyIndexes(), txnCtx, transactionSubsystem.getLockManager(), transactionSubsystem, metadataIndex.getResourceId(), metadataStoragePartition, ResourceType.LSM_BTREE, indexOp); default: throw new IllegalStateException("Unknown operation type: " + indexOp); } }
/** * Inserts a metadata dataset to the physical dataset index Should be performed * on a bootstrap of a new universe * * @param mdTxnCtx * @param indexes * @throws AlgebricksException */ public static void insertMetadataDatasets(MetadataTransactionContext mdTxnCtx, IMetadataIndex[] indexes) throws AlgebricksException { for (int i = 0; i < indexes.length; i++) { IDatasetDetails id = new InternalDatasetDetails(FileStructure.BTREE, PartitioningStrategy.HASH, indexes[i].getPartitioningExpr(), indexes[i].getPartitioningExpr(), null, indexes[i].getPartitioningExprType(), false, null); MetadataManager.INSTANCE.addDataset(mdTxnCtx, new Dataset(indexes[i].getDataverseName(), indexes[i].getIndexedDatasetName(), indexes[i].getDataverseName(), indexes[i].getPayloadRecordType().getTypeName(), indexes[i].getNodeGroupName(), GlobalConfig.DEFAULT_COMPACTION_POLICY_NAME, GlobalConfig.DEFAULT_COMPACTION_POLICY_PROPERTIES, id, new HashMap<String, String>(), DatasetType.INTERNAL, indexes[i].getDatasetId().getId(), MetadataUtil.PENDING_NO_OP)); } if (LOGGER.isInfoEnabled()) { LOGGER.info("Finished inserting initial datasets."); } }
@Test public void fullMergeTest() throws Exception { String datasetName = "ds"; TestDataUtil.createIdOnlyDataset(datasetName); INcApplicationContext appCtx = (INcApplicationContext) (integrationUtil.ncs[0].getApplicationContext()); IDatasetLifecycleManager dlcm = appCtx.getDatasetLifecycleManager(); IMetadataIndex dsIdx = MetadataPrimaryIndexes.DATASET_DATASET; DatasetInfo datasetInfo = dlcm.getDatasetInfo(dsIdx.getDatasetId().getId()); // flush to ensure multiple disk components dlcm.flushAllDatasets(); datasetInfo.waitForIO(); AbstractLSMIndex index = (AbstractLSMIndex) dlcm.getIndex(dsIdx.getDatasetId().getId(), dsIdx.getResourceId()); Assert.assertTrue(index.getDiskComponents().size() > 1); // trigger full merge and ensure we have a single disk component when merge completes testExecutor.executeSqlppUpdateOrDdl("COMPACT DATASET Metadata.`Dataset`;", TestCaseContext.OutputFormat.CLEAN_JSON); datasetInfo.waitForIO(); Assert.assertTrue(index.getDiskComponents().size() == 1); } }
final int datasetId = index.getDatasetId().getId(); if (!appContext.getDatasetMemoryManager().reserve(index.getDatasetId().getId())) { throw new IllegalStateException("Failed to reserve memory for metadata dataset (" + datasetId + ")");
dlcm.flushAllDatasets(); IMetadataIndex idx = MetadataPrimaryIndexes.NODEGROUP_DATASET; DatasetInfo datasetInfo = dlcm.getDatasetInfo(idx.getDatasetId().getId()); AbstractLSMIndex index = (AbstractLSMIndex) appCtx.getDatasetLifecycleManager() .getIndex(idx.getDatasetId().getId(), idx.getResourceId()); PrimaryIndexOperationTracker opTracker = (PrimaryIndexOperationTracker) index.getOperationTracker(); final MetadataTransactionContext mdTxn2 = MetadataManager.INSTANCE.beginTransaction();