@Override public void jobletFinish(JobStatus jobStatus) { try { ITransactionManager txnManager = ((INcApplicationContext) jobletContext.getServiceContext().getApplicationContext()) .getTransactionSubsystem().getTransactionManager(); ITransactionContext txnContext = txnManager.getTransactionContext(txnId); txnContext.setWriteTxn(transactionalWrite); if (jobStatus != JobStatus.FAILURE) { txnManager.commitTransaction(txnId); } else { txnManager.abortTransaction(txnId); } } catch (ACIDException e) { throw new Error(e); } }
@Override public void beginTransaction(TxnId transactionId) throws RemoteException { TransactionOptions options = new TransactionOptions(AtomicityLevel.ATOMIC); transactionSubsystem.getTransactionManager().beginTransaction(transactionId, options); }
@Override public long getMaxTxnId() { if (txnSubsystem == null) { throw new IllegalStateException("cannot determine max txn id before txnSubsystem is initialized!"); } return Math.max(MetadataManager.INSTANCE == null ? 0 : MetadataManager.INSTANCE.getMaxTxnId(), txnSubsystem.getTransactionManager().getMaxTxnId()); }
IDatasetLifecycleManager dsLifecycleMgr = ncAppCtx.getDatasetLifecycleManager(); RecordTupleGenerator tupleGenerator = StorageTestUtils.getTupleGenerator(); ITransactionContext txnCtx = nc.getTransactionManager().beginTransaction(nc.getTxnJobId(ctx), new TransactionOptions(ITransactionManager.AtomicityLevel.ENTITY_LEVEL)); boolean failed = false; nc.getTransactionManager().abortTransaction(txnCtx.getTxnId()); } else { nc.getTransactionManager().commitTransaction(txnCtx.getTxnId());
@SuppressWarnings("squid:S1181") @Override public void commitTransaction(TxnId txnId) throws RemoteException { try { transactionSubsystem.getTransactionManager().commitTransaction(txnId); } catch (Throwable th) { // Metadata node should abort all Metadata transactions on re-start LOGGER.fatal("Failure committing a metadata transaction", th); ExitUtil.halt(ExitUtil.EC_FAILED_TO_COMMIT_METADATA_TXN); } }
@Override public void open() throws HyracksDataException { try { transactionContext = transactionManager.getTransactionContext(txnId); transactionContext.setWriteTxn(isWriteTransaction); ILogMarkerCallback callback = TaskUtil.get(ILogMarkerCallback.KEY_MARKER_CALLBACK, ctx); logRecord = new LogRecord(callback); if (isSink) { return; } initAccessAppend(ctx); super.open(); } catch (ACIDException e) { throw HyracksDataException.create(e); } }
nc.getTransactionManager().commitTransaction(txnCtx.getTxnId()); txnCtx = nc.getTransactionManager().beginTransaction(nc.getTxnJobId(ctx), new TransactionOptions(ITransactionManager.AtomicityLevel.ENTITY_LEVEL)); insertOp = StorageTestUtils.getInsertPipeline(nc, ctx); nc.getTransactionManager().commitTransaction(txnCtx.getTxnId()); StorageTestUtils.searchAndAssertCount(nc, PARTITION, StorageTestUtils.TOTAL_NUM_OF_RECORDS);
@SuppressWarnings("squid:S1181") @Override public void abortTransaction(TxnId txnId) throws RemoteException { try { transactionSubsystem.getTransactionManager().abortTransaction(txnId); } catch (Throwable th) { // Metadata node should abort all uncommitted transactions on re-start LOGGER.fatal("Failure committing a metadata transaction", th); ExitUtil.halt(ExitUtil.EC_FAILED_TO_ABORT_METADATA_TXN); } }
txnSubsystem.getTransactionManager().ensureMaxTxnId(txnId);
@After public void destroyIndex() throws Exception { Request close = new Request(Request.Action.INSERT_CLOSE); actor.add(close); close.await(); nc.getTransactionManager().commitTransaction(txnCtx.getTxnId()); if (secondaryIndexDataflowHelper != null) { secondaryIndexDataflowHelper.destroy(); } primaryIndexDataflowHelper.destroy(); actor.stop(); }
@Override public LockThenSearchOperationCallback createSearchOperationCallback(long resourceId, IHyracksTaskContext ctx, IOperatorNodePushable operatorNodePushable) throws HyracksDataException { ITransactionSubsystem txnSubsystem = txnSubsystemProvider.getTransactionSubsystem(ctx); try { IJobletEventListenerFactory fact = ctx.getJobletContext().getJobletEventListenerFactory(); ITransactionContext txnCtx = txnSubsystem.getTransactionManager() .getTransactionContext(((IJobEventListenerFactory) fact).getTxnId(datasetId)); return new LockThenSearchOperationCallback(new DatasetId(datasetId), resourceId, primaryKeyFields, txnSubsystem, txnCtx, operatorNodePushable); } catch (ACIDException e) { throw HyracksDataException.create(e); } }
JobId jobId = nc.newJobId(); IHyracksTaskContext ctx = nc.createTestContext(jobId, 0, true); ITransactionContext txnCtx = nc.getTransactionManager().beginTransaction(nc.getTxnJobId(ctx), new TransactionOptions(ITransactionManager.AtomicityLevel.ENTITY_LEVEL)); LSMInsertDeleteOperatorNodePushable insertOp = nc.getTransactionManager().commitTransaction(txnCtx.getTxnId()); IndexDataflowHelperFactory iHelperFactory = new IndexDataflowHelperFactory(nc.getStorageManager(), indexInfo.getFileSplitProvider());
private void testAbort(ITupleReference tuple) throws Exception { setFailModificationCallback(lsmBtree); abortOp.open(); boolean aborted = false; VSizeFrame frame = new VSizeFrame(ctx); FrameTupleAppender tupleAppender = new FrameTupleAppender(frame); try { DataflowUtils.addTupleToFrame(tupleAppender, tuple, abortOp); tupleAppender.write(abortOp, true); } catch (HyracksDataException e) { StorageTestUtils.allowAllOps(lsmBtree); nc.getTransactionManager().abortTransaction(abortTxnCtx.getTxnId()); aborted = true; } finally { abortOp.close(); } Assert.assertTrue(aborted); }
public TransactionSubsystem(INcApplicationContext appCtx, IRecoveryManagerFactory recoveryManagerFactory) { this.appCtx = appCtx; this.id = appCtx.getServiceContext().getNodeId(); this.txnProperties = appCtx.getTransactionProperties(); this.transactionManager = new TransactionManager(this); this.lockManager = new ConcurrentLockManager(txnProperties.getLockManagerShrinkTimer()); final ReplicationProperties repProperties = appCtx.getReplicationProperties(); final boolean replicationEnabled = repProperties.isReplicationEnabled(); final CheckpointProperties checkpointProperties = new CheckpointProperties(txnProperties, id); if (LOGGER.isInfoEnabled()) { LOGGER.log(Level.INFO, "Checkpoint Properties: " + checkpointProperties); } checkpointManager = CheckpointManagerFactory.create(this, checkpointProperties); final Checkpoint latestCheckpoint = checkpointManager.getLatest(); if (latestCheckpoint != null) { transactionManager.ensureMaxTxnId(latestCheckpoint.getMaxTxnId()); } this.logManager = replicationEnabled ? new LogManagerWithReplication(this) : new LogManager(this); this.recoveryManager = recoveryManagerFactory.createRecoveryManager(appCtx.getServiceContext(), this); if (txnProperties.isCommitProfilerEnabled()) { ecp = new EntityCommitProfiler(this, this.txnProperties.getCommitProfilerReportInterval()); ((ExecutorService) appCtx.getThreadExecutor()).submit(ecp); } }
@Override public void jobletFinish(JobStatus jobStatus) { try { ITransactionManager txnManager = ((INcApplicationContext) jobletContext.getServiceContext().getApplicationContext()) .getTransactionSubsystem().getTransactionManager(); for (TxnId subTxnId : txnIdMap.values()) { ITransactionContext txnContext = txnManager.getTransactionContext(subTxnId); txnContext.setWriteTxn(transactionalWrite); if (jobStatus != JobStatus.FAILURE) { txnManager.commitTransaction(subTxnId); } else { txnManager.abortTransaction(subTxnId); } } } catch (ACIDException e) { throw new Error(e); } }
@After public void destroyIndex() throws Exception { for (int i = 0; i < NUM_PARTITIONS; i++) { Request close = new Request(Request.Action.INSERT_CLOSE); actors[i].add(close); close.await(); } nc.getTransactionManager().commitTransaction(txnCtx.getTxnId()); for (IIndexDataflowHelper indexDataflowHelper : secondaryIndexDataflowHelpers) { indexDataflowHelper.destroy(); } for (IIndexDataflowHelper indexDataflowHelper : primaryIndexDataflowHelpers) { indexDataflowHelper.destroy(); } for (Actor actor : actors) { actor.stop(); } }
@Override public void jobletStart() { try { TransactionOptions options = new TransactionOptions(ITransactionManager.AtomicityLevel.ENTITY_LEVEL); for (TxnId subTxnId : txnIdMap.values()) { ((INcApplicationContext) jobletContext.getServiceContext().getApplicationContext()) .getTransactionSubsystem().getTransactionManager().beginTransaction(subTxnId, options); } } catch (ACIDException e) { throw new Error(e); } }
@Override public ISearchOperationCallback createSearchOperationCallback(long resourceId, IHyracksTaskContext ctx, IOperatorNodePushable operatorNodePushable) throws HyracksDataException { ITransactionSubsystem txnSubsystem = txnSubsystemProvider.getTransactionSubsystem(ctx); try { IJobletEventListenerFactory fact = ctx.getJobletContext().getJobletEventListenerFactory(); ITransactionContext txnCtx = txnSubsystem.getTransactionManager() .getTransactionContext(((IJobEventListenerFactory) fact).getTxnId(datasetId)); return new PrimaryIndexInstantSearchOperationCallback(new DatasetId(datasetId), resourceId, primaryKeyFields, txnSubsystem.getLockManager(), txnCtx); } catch (ACIDException e) { throw HyracksDataException.create(e); } }
protected void capture(long minMCTFirstLSN, boolean sharp) throws HyracksDataException { ILogManager logMgr = txnSubsystem.getLogManager(); ITransactionManager txnMgr = txnSubsystem.getTransactionManager(); final long nextCheckpointId = getNextCheckpointId(); final Checkpoint checkpointObject = new Checkpoint(nextCheckpointId, logMgr.getAppendLSN(), minMCTFirstLSN, txnMgr.getMaxTxnId(), sharp, StorageConstants.VERSION); persist(checkpointObject); cleanup(); }
private ITupleReference insertRecords(int numRecords) throws Exception { StorageTestUtils.allowAllOps(lsmBtree); insertOp.open(); VSizeFrame frame = new VSizeFrame(ctx); FrameTupleAppender tupleAppender = new FrameTupleAppender(frame); ITupleReference tuple = null; for (int i = 0; i < numRecords; i++) { tuple = tupleGenerator.next(); DataflowUtils.addTupleToFrame(tupleAppender, tuple, insertOp); } if (tupleAppender.getTupleCount() > 0) { tupleAppender.write(insertOp, true); } insertOp.close(); nc.getTransactionManager().commitTransaction(txnCtx.getTxnId()); return tuple; }