@Override public void start() { checkpointer = new CheckpointThread(this, txnSubsystem.getLogManager(), lsnThreshold, pollFrequency); checkpointer.start(); }
public RemoteLogsProcessor(INcApplicationContext appCtx) { logManager = appCtx.getTransactionSubsystem().getLogManager(); remoteLogsQ = new LinkedBlockingQueue<>(); appCtx.getThreadExecutor().execute(new RemoteLogsNotifier(appCtx, remoteLogsQ)); }
/** * Performs a sharp checkpoint. All datasets are flushed and all transaction * log files are deleted. */ @Override public synchronized void doSharpCheckpoint() throws HyracksDataException { LOGGER.info("Starting sharp checkpoint..."); final IDatasetLifecycleManager datasetLifecycleManager = txnSubsystem.getApplicationContext().getDatasetLifecycleManager(); datasetLifecycleManager.flushAllDatasets(); capture(SHARP_CHECKPOINT_LSN, true); txnSubsystem.getLogManager().renewLogFiles(); LOGGER.info("Completed sharp checkpoint."); }
private void logWait() throws ACIDException { indexRecord.setLogType(LogType.WAIT); indexRecord.computeAndSetLogSize(); txnSubsystem.getLogManager().log(indexRecord); // set the log type back to UPDATE for normal updates indexRecord.setLogType(LogType.UPDATE); } }
public void after(ITupleReference newValue) throws HyracksDataException { if (newValue != null) { filterRecord.setNewValueSize(SimpleTupleWriter.INSTANCE.bytesRequired(newValue)); filterRecord.setNewValue(newValue); filterRecord.computeAndSetLogSize(); txnSubsystem.getLogManager().log(filterRecord); } }
private void initIndexCheckpoint(INcApplicationContext appCtx) throws HyracksDataException { final ResourceReference indexRef = ResourceReference.of(file); final IIndexCheckpointManagerProvider checkpointManagerProvider = appCtx.getIndexCheckpointManagerProvider(); final IIndexCheckpointManager indexCheckpointManager = checkpointManagerProvider.get(indexRef); final long currentLSN = appCtx.getTransactionSubsystem().getLogManager().getAppendLSN(); indexCheckpointManager.delete(); indexCheckpointManager.init(Long.MIN_VALUE, currentLSN, LSMComponentId.EMPTY_INDEX_LAST_COMPONENT_ID.getMaxId()); LOGGER.info(() -> "Checkpoint index: " + indexRef); }
/*** * Attempts to perform a soft checkpoint at the specified {@code checkpointTargetLSN}. * If a checkpoint cannot be captured due to datasets having LSN < {@code checkpointTargetLSN}, * an asynchronous flush is triggered on them. When a checkpoint is successful, all transaction * log files that end with LSN < {@code checkpointTargetLSN} are deleted. */ @Override public synchronized long tryCheckpoint(long checkpointTargetLSN) throws HyracksDataException { LOGGER.info("Attemping soft checkpoint..."); final long minSecuredLSN = getMinSecuredLSN(); if (minSecuredLSN != NO_SECURED_LSN && checkpointTargetLSN >= minSecuredLSN) { return minSecuredLSN; } final long minFirstLSN = txnSubsystem.getRecoveryManager().getMinFirstLSN(); boolean checkpointSucceeded = minFirstLSN >= checkpointTargetLSN; if (!checkpointSucceeded) { // Flush datasets with indexes behind target checkpoint LSN final IDatasetLifecycleManager dlcm = txnSubsystem.getApplicationContext().getDatasetLifecycleManager(); dlcm.asyncFlushMatchingIndexes(newLaggingDatasetPredicate(checkpointTargetLSN)); } capture(minFirstLSN, false); if (checkpointSucceeded) { txnSubsystem.getLogManager().deleteOldLogFiles(minFirstLSN); LOGGER.info(String.format("soft checkpoint succeeded at LSN(%s)", minFirstLSN)); } return minFirstLSN; }
protected void capture(long minMCTFirstLSN, boolean sharp) throws HyracksDataException { ILogManager logMgr = txnSubsystem.getLogManager(); ITransactionManager txnMgr = txnSubsystem.getTransactionManager(); final long nextCheckpointId = getNextCheckpointId(); final Checkpoint checkpointObject = new Checkpoint(nextCheckpointId, logMgr.getAppendLSN(), minMCTFirstLSN, txnMgr.getMaxTxnId(), sharp, StorageConstants.VERSION); persist(checkpointObject); cleanup(); }
@Override public void perform(INcApplicationContext appCtx, IReplicationWorker worker) { final ReplicationChannel replicationChannel = (ReplicationChannel) appCtx.getReplicationChannel(); final RemoteLogsProcessor logsProcessor = replicationChannel.getRemoteLogsProcessor(); final ILogManager logManager = appCtx.getTransactionSubsystem().getLogManager(); final RemoteLogRecord reusableLog = new RemoteLogRecord(); final ISocketChannel channel = worker.getChannel(); ByteBuffer logsBuffer = ByteBuffer.allocate(logManager.getLogPageSize()); try { while (true) { // read a batch of logs logsBuffer = ReplicationProtocol.readRequest(channel, logsBuffer); // check if it is end of handshake if (logsBuffer.remaining() == END_REPLICATION_LOG_SIZE) { break; } logsProcessor.process(logsBuffer, reusableLog, worker); } } catch (IOException e) { throw new ReplicationException(e); } }
public RecoveryManager(INCServiceContext serviceCtx, ITransactionSubsystem txnSubsystem) { this.serviceCtx = serviceCtx; this.txnSubsystem = txnSubsystem; this.appCtx = txnSubsystem.getApplicationContext(); logMgr = (LogManager) txnSubsystem.getLogManager(); ReplicationProperties repProperties = appCtx.getReplicationProperties(); replicationEnabled = repProperties.isReplicationEnabled(); localResourceRepository = (PersistentLocalResourceRepository) appCtx.getLocalResourceRepository(); cachedEntityCommitsPerJobSize = txnSubsystem.getTransactionProperties().getJobRecoveryMemorySize(); checkpointManager = txnSubsystem.getCheckpointManager(); }
public CommitRuntime(IHyracksTaskContext ctx, TxnId txnId, int datasetId, int[] primaryKeyFields, boolean isWriteTransaction, int resourcePartition, boolean isSink) { this.ctx = ctx; INcApplicationContext appCtx = (INcApplicationContext) ctx.getJobletContext().getServiceContext().getApplicationContext(); this.transactionManager = appCtx.getTransactionSubsystem().getTransactionManager(); this.logMgr = appCtx.getTransactionSubsystem().getLogManager(); this.txnId = txnId; this.datasetId = datasetId; this.primaryKeyFields = primaryKeyFields; this.tRef = new FrameTupleReference(); this.isWriteTransaction = isWriteTransaction; this.resourcePartition = resourcePartition; this.isSink = isSink; longHashes = new long[2]; }
@Test public void interruptedLogFileSwitch() throws Exception { final INcApplicationContext ncAppCtx = (INcApplicationContext) integrationUtil.ncs[0].getApplicationContext(); final LogManager logManager = (LogManager) ncAppCtx.getTransactionSubsystem().getLogManager(); int logFileCountBeforeInterrupt = logManager.getOrderedLogFileIds().size(); // ensure an interrupted transactor will create next log file but will fail to position the log channel final AtomicBoolean failed = new AtomicBoolean(false); Thread interruptedTransactor = new Thread(() -> { Thread.currentThread().interrupt(); try { prepareNextLogFile(logManager); } catch (Exception e) { failed.set(true); } }); interruptedTransactor.start(); interruptedTransactor.join(); // ensure a new log file was created and survived interrupt int logFileCountAfterInterrupt = logManager.getOrderedLogFileIds().size(); Assert.assertEquals(logFileCountBeforeInterrupt + 1, logFileCountAfterInterrupt); Assert.assertFalse(failed.get()); // make sure we can still log to the new file interruptedLogPageSwitch(); }
@Override public void commitTransaction(TxnId txnId) throws ACIDException { final ITransactionContext txnCtx = getTransactionContext(txnId); try { if (txnCtx.isWriteTxn()) { LogRecord logRecord = new LogRecord(); TransactionUtil.formJobTerminateLogRecord(txnCtx, logRecord, true); txnSubsystem.getLogManager().log(logRecord); txnCtx.setTxnState(ITransactionManager.COMMITTED); } } catch (Exception e) { if (LOGGER.isErrorEnabled()) { LOGGER.error(" caused exception in commit !" + txnCtx.getTxnId()); } throw e; } finally { txnCtx.complete(); txnSubsystem.getLockManager().releaseLocks(txnCtx); txnCtxRepository.remove(txnCtx.getTxnId()); } }
public LockThenSearchOperationCallback(DatasetId datasetId, long resourceId, int[] entityIdFields, ITransactionSubsystem txnSubsystem, ITransactionContext txnCtx, IOperatorNodePushable operatorNodePushable) { super(datasetId, resourceId, entityIdFields, txnCtx, txnSubsystem.getLockManager()); this.operatorNodePushable = (LSMIndexInsertUpdateDeleteOperatorNodePushable) operatorNodePushable; this.logManager = txnSubsystem.getLogManager(); this.logRecord = new LogRecord(); logRecord.setTxnCtx(txnCtx); logRecord.setLogSource(LogSource.LOCAL); logRecord.setLogType(LogType.WAIT); logRecord.setTxnId(txnCtx.getTxnId().getId()); logRecord.computeAndSetLogSize(); }
@Test public void waitLogTest() throws Exception { final INcApplicationContext ncAppCtx = (INcApplicationContext) integrationUtil.ncs[0].getApplicationContext(); LogRecord logRecord = new LogRecord(); final long txnId = 1; logRecord.setTxnCtx(TransactionContextFactory.create(new TxnId(txnId), new TransactionOptions(ITransactionManager.AtomicityLevel.ENTITY_LEVEL))); logRecord.setLogSource(LogSource.LOCAL); logRecord.setLogType(LogType.WAIT); logRecord.setTxnId(txnId); logRecord.isFlushed(false); logRecord.computeAndSetLogSize(); Thread transactor = new Thread(() -> { final LogManager logManager = (LogManager) ncAppCtx.getTransactionSubsystem().getLogManager(); logManager.log(logRecord); }); transactor.start(); transactor.join(TimeUnit.SECONDS.toMillis(30)); Assert.assertTrue(logRecord.isFlushed()); }
protected void log(int PKHash, ITupleReference newValue, ITupleReference oldValue) throws ACIDException { indexRecord.setPKHashValue(PKHash); indexRecord.setPKFields(primaryKeyFields); indexRecord.setPKValue(newValue); indexRecord.computeAndSetPKValueSize(); if (newValue != null) { indexRecord.setNewValueSize(SimpleTupleWriter.INSTANCE.bytesRequired(newValue)); indexRecord.setNewValue(newValue); } else { indexRecord.setNewValueSize(0); } if (oldValue != null) { indexRecord.setOldValueSize(SimpleTupleWriter.INSTANCE.bytesRequired(oldValue)); indexRecord.setOldValue(oldValue); } else { indexRecord.setOldValueSize(0); } indexRecord.computeAndSetLogSize(); txnSubsystem.getLogManager().log(indexRecord); }
@Override public void abortTransaction(TxnId txnId) throws ACIDException { final ITransactionContext txnCtx = getTransactionContext(txnId); try { if (txnCtx.isWriteTxn()) { LogRecord logRecord = new LogRecord(); TransactionUtil.formJobTerminateLogRecord(txnCtx, logRecord, false); txnSubsystem.getLogManager().log(logRecord); txnSubsystem.getCheckpointManager().secure(txnId); txnSubsystem.getRecoveryManager().rollbackTransaction(txnCtx); txnCtx.setTxnState(ITransactionManager.ABORTED); } } catch (HyracksDataException e) { String msg = "Could not complete rollback! System is in an inconsistent state"; if (LOGGER.isErrorEnabled()) { LOGGER.log(Level.ERROR, msg, e); } throw new ACIDException(msg, e); } finally { txnCtx.complete(); txnSubsystem.getLockManager().releaseLocks(txnCtx); txnCtxRepository.remove(txnCtx.getTxnId()); txnSubsystem.getCheckpointManager().completed(txnId); } }
StoragePathUtil.getPartitionNumFromRelativePath(resourceName), lsmIndex, modCallback, metadataIndex.isPrimaryIndex()); LSMIndexUtil.checkAndSetFirstLSN((AbstractLSMIndex) lsmIndex, transactionSubsystem.getLogManager()); switch (op) { case INSERT:
@Override public ILSMOperationTracker getOperationTracker(INCServiceContext ctx, IResource resource) { try { INcApplicationContext appCtx = (INcApplicationContext) ctx.getApplicationContext(); DatasetLifecycleManager dslcManager = (DatasetLifecycleManager) appCtx.getDatasetLifecycleManager(); DatasetResource dsr = dslcManager.getDatasetLifecycle(datasetId); int partition = StoragePathUtil.getPartitionNumFromRelativePath(resource.getPath()); PrimaryIndexOperationTracker opTracker = dslcManager.getOperationTracker(datasetId, partition, resource.getPath()); if (!(opTracker instanceof TestPrimaryIndexOperationTracker)) { Field opTrackersField = DatasetResource.class.getDeclaredField("datasetPrimaryOpTrackers"); opTracker = new TestPrimaryIndexOperationTracker(datasetId, partition, appCtx.getTransactionSubsystem().getLogManager(), dsr.getDatasetInfo(), dslcManager.getComponentIdGenerator(datasetId, partition, resource.getPath())); replaceMapEntry(opTrackersField, dsr, partition, opTracker); } return opTracker; } catch (Exception e) { throw new RuntimeException(e); } }
private void stallAbortTxn(Thread t, ITransactionContext txnCtx, ITransactionSubsystem txnSubsystem, TxnId txnId) throws InterruptedException, HyracksDataException { try { if (txnCtx.isWriteTxn()) { LogRecord logRecord = new LogRecord(); TransactionUtil.formJobTerminateLogRecord(txnCtx, logRecord, false); txnSubsystem.getLogManager().log(logRecord); txnSubsystem.getCheckpointManager().secure(txnId); synchronized (t) { t.notifyAll(); t.wait(); } txnSubsystem.getRecoveryManager().rollbackTransaction(txnCtx); txnCtx.setTxnState(ITransactionManager.ABORTED); } } catch (ACIDException | HyracksDataException e) { String msg = "Could not complete rollback! System is in an inconsistent state"; throw new ACIDException(msg, e); } finally { txnCtx.complete(); txnSubsystem.getLockManager().releaseLocks(txnCtx); txnSubsystem.getCheckpointManager().completed(txnId); } }