private void checkSize(final int size, final int compactCount) throws Exception { if (getWritingChannel() == null) { if (!checkCompact(compactCount)) { // will need to open a file either way openFile(); } } else { if (compactCount >= 0) { if (checkCompact(compactCount)) { // The file was already moved on this case, no need to check for the size. // otherwise we will also need to check for the size return; } } if (getWritingChannel().writerIndex() + size > getWritingChannel().capacity()) { openFile(); } } }
@Override public void onReadUpdateRecordTX(final long transactionID, final RecordInfo info) throws Exception { if (logger.isTraceEnabled()) { logger.trace("onReadUpdateRecordTX " + info); } if (pendingTransactions.get(transactionID) != null || containsRecord(info.id)) { JournalTransaction newTransaction = getNewJournalTransaction(transactionID); JournalInternalRecord updateRecordTX = new JournalAddRecordTX(false, transactionID, info.id, info.userRecordType, EncoderPersister.getInstance(), new ByteArrayEncoding(info.data)); updateRecordTX.setCompactCount((short) (info.compactCount + 1)); checkSize(updateRecordTX.getEncodeSize(), info.compactCount); writeEncoder(updateRecordTX); newTransaction.addPositive(currentFile, info.id, updateRecordTX.getEncodeSize()); } else { onReadUpdateRecord(info); } }
@Override public void onReadDeleteRecordTX(final long transactionID, final RecordInfo info) throws Exception { if (logger.isTraceEnabled()) { logger.trace("onReadDeleteRecordTX " + transactionID + " info " + info); } if (pendingTransactions.get(transactionID) != null) { JournalTransaction newTransaction = getNewJournalTransaction(transactionID); JournalInternalRecord record = new JournalDeleteRecordTX(transactionID, info.id, new ByteArrayEncoding(info.data)); checkSize(record.getEncodeSize()); writeEncoder(record); newTransaction.addNegative(currentFile, info.id); } // else.. nothing to be done }
compactor = new JournalCompactor(fileFactory, this, filesRepository, records.keysLongHashSet(), dataFilesToProcess.get(0).getFileID()); compactor.addPendingTransaction(id, pendingTransaction.getPositiveArray()); pendingTransaction.setCompacting(); }); compactor.flush(); SequentialFile controlFile = createControlFile(dataFilesToProcess, compactor.getNewDataFiles(), null); newDatafiles = localCompactor.getNewDataFiles(); localCompactor.getNewRecords().forEach((id, newRecord) -> { records.put(id, newRecord); }); localCompactor.getNewTransactions().forEach((id, newTransaction) -> newTransaction.replaceRecordProvider(this)); localCompactor.replayPendingCommands(); localCompactor.getNewTransactions().forEach((id, newTransaction) -> { if (logger.isTraceEnabled()) { logger.trace("Merging pending transaction " + newTransaction + " after compacting the journal"); compactor.flush(); } catch (Throwable ignored) {
@Override public void onReadAddRecord(final RecordInfo info) throws Exception { if (logger.isTraceEnabled()) { logger.trace("Read Record " + info); } if (containsRecord(info.id)) { JournalInternalRecord addRecord = new JournalAddRecord(true, info.id, info.getUserRecordType(), EncoderPersister.getInstance(), new ByteArrayEncoding(info.data)); addRecord.setCompactCount((short) (info.compactCount + 1)); checkSize(addRecord.getEncodeSize(), info.compactCount); writeEncoder(addRecord); newRecords.put(info.id, new JournalRecord(currentFile, addRecord.getEncodeSize())); } }
logger.trace("adding txID=" + this.id + " into compacting"); compactor.addCommandCommit(this, file); } else { JournalRecord posFiles = journal.getRecords().get(trUpdate.id); if (compactor != null && compactor.containsRecord(trUpdate.id)) { compactor.addCommandUpdate(trUpdate.id, trUpdate.file, trUpdate.size); } else if (posFiles == null) { posFiles = new JournalRecord(trUpdate.file, trUpdate.size); for (JournalUpdate trDelete : neg) { if (compactor != null) { compactor.addCommandDelete(trDelete.id, trDelete.file); } else { JournalRecord posFiles = journal.getRecords().remove(trDelete.id);
@Override public void onReadRollbackRecord(final long transactionID) throws Exception { if (logger.isTraceEnabled()) { logger.trace("onReadRollbackRecord " + transactionID); } if (pendingTransactions.get(transactionID) != null) { // Sanity check, this should never happen throw new IllegalStateException("Inconsistency during compacting: RollbackRecord ID = " + transactionID + " for an already rolled back transaction during compacting"); } else { JournalTransaction newTransaction = newTransactions.remove(transactionID); if (newTransaction != null) { JournalInternalRecord rollbackRecord = new JournalRollbackRecordTX(transactionID); checkSize(rollbackRecord.getEncodeSize()); writeEncoder(rollbackRecord); newTransaction.rollback(currentFile); } } }
@Override public void run() { journalLock.readLock().lock(); try { known.set(records.containsKey(id) || pendingRecords.contains(id) || (compactor != null && compactor.containsRecord(id))); } finally { journalLock.readLock().unlock(); } } });
private void checkSize(final int size) throws Exception { checkSize(size, -1); }
/** * The caller of this method needs to guarantee appendLock.lock before calling this method if being used outside of the lock context. * or else potFilesMap could be affected */ public void rollback(final JournalFile file) { JournalCompactor compactor = journal.getCompactor(); if (compacting && compactor != null) { compactor.addCommandRollback(this, file); } else { // Now add negs for the pos we added in each file in which there were // transactional operations // Note that we do this on rollback as we do on commit, since we need // to ensure the file containing // the rollback record doesn't get deleted before the files with the // transactional operations are deleted // Otherwise we may run into problems especially with XA where we are // just left with a prepare when the tx // has actually been rolled back for (JournalFile jf : pendingFiles) { file.incNegCount(jf); } } }
public void addCommandCommit(final JournalTransaction liveTransaction, final JournalFile currentFile) { if (logger.isTraceEnabled()) { logger.trace("addCommandCommit " + liveTransaction.getId()); } pendingCommands.add(new CommitCompactCommand(liveTransaction, currentFile)); long[] ids = liveTransaction.getPositiveArray(); PendingTransaction oldTransaction = pendingTransactions.get(liveTransaction.getId()); long[] ids2 = null; if (oldTransaction != null) { ids2 = oldTransaction.pendingIDs; } /** If a delete comes for these records, while the compactor still working, we need to be able to take them into account for later deletes * instead of throwing exceptions about non existent records */ if (ids != null) { for (long id : ids) { addToRecordsSnaptshot(id); } } if (ids2 != null) { for (long id : ids2) { addToRecordsSnaptshot(id); } } }
compactor.addCommandUpdate(id, usedFile, updateRecord.getEncodeSize()); } else { jrnRecord.addUpdateFile(usedFile, updateRecord.getEncodeSize());
logger.debug("Record " + id + " had been deleted already from a different call"); } else { compactor.addCommandDelete(id, usedFile);
compactor = new JournalCompactor(fileFactory, this, filesRepository, records.keysLongHashSet(), dataFilesToProcess.get(0).getFileID()); compactor.addPendingTransaction(id, pendingTransaction.getPositiveArray()); pendingTransaction.setCompacting(); }); compactor.flush(); SequentialFile controlFile = createControlFile(dataFilesToProcess, compactor.getNewDataFiles(), null); newDatafiles = localCompactor.getNewDataFiles(); localCompactor.getNewRecords().forEach((id, newRecord) -> { records.put(id, newRecord); }); localCompactor.getNewTransactions().forEach((id, newTransaction) -> newTransaction.replaceRecordProvider(this)); localCompactor.replayPendingCommands(); localCompactor.getNewTransactions().forEach((id, newTransaction) -> { if (logger.isTraceEnabled()) { logger.trace("Merging pending transaction " + newTransaction + " after compacting the journal"); compactor.flush(); } catch (Throwable ignored) {
@Override public void onReadAddRecord(final RecordInfo info) throws Exception { if (logger.isTraceEnabled()) { logger.trace("Read Record " + info); } if (containsRecord(info.id)) { JournalInternalRecord addRecord = new JournalAddRecord(true, info.id, info.getUserRecordType(), EncoderPersister.getInstance(), new ByteArrayEncoding(info.data)); addRecord.setCompactCount((short) (info.compactCount + 1)); checkSize(addRecord.getEncodeSize(), info.compactCount); writeEncoder(addRecord); newRecords.put(info.id, new JournalRecord(currentFile, addRecord.getEncodeSize())); } }
@Override public void onReadDeleteRecordTX(final long transactionID, final RecordInfo info) throws Exception { if (logger.isTraceEnabled()) { logger.trace("onReadDeleteRecordTX " + transactionID + " info " + info); } if (pendingTransactions.get(transactionID) != null) { JournalTransaction newTransaction = getNewJournalTransaction(transactionID); JournalInternalRecord record = new JournalDeleteRecordTX(transactionID, info.id, new ByteArrayEncoding(info.data)); checkSize(record.getEncodeSize()); writeEncoder(record); newTransaction.addNegative(currentFile, info.id); } // else.. nothing to be done }
logger.trace("adding txID=" + this.id + " into compacting"); compactor.addCommandCommit(this, file); } else { JournalRecord posFiles = journal.getRecords().get(trUpdate.id); if (compactor != null && compactor.containsRecord(trUpdate.id)) { compactor.addCommandUpdate(trUpdate.id, trUpdate.file, trUpdate.size); } else if (posFiles == null) { posFiles = new JournalRecord(trUpdate.file, trUpdate.size); for (JournalUpdate trDelete : neg) { if (compactor != null) { compactor.addCommandDelete(trDelete.id, trDelete.file); } else { JournalRecord posFiles = journal.getRecords().remove(trDelete.id);
@Override public void onReadRollbackRecord(final long transactionID) throws Exception { if (logger.isTraceEnabled()) { logger.trace("onReadRollbackRecord " + transactionID); } if (pendingTransactions.get(transactionID) != null) { // Sanity check, this should never happen throw new IllegalStateException("Inconsistency during compacting: RollbackRecord ID = " + transactionID + " for an already rolled back transaction during compacting"); } else { JournalTransaction newTransaction = newTransactions.remove(transactionID); if (newTransaction != null) { JournalInternalRecord rollbackRecord = new JournalRollbackRecordTX(transactionID); checkSize(rollbackRecord.getEncodeSize()); writeEncoder(rollbackRecord); newTransaction.rollback(currentFile); } } }
@Override public void run() { journalLock.readLock().lock(); try { known.set(records.containsKey(id) || pendingRecords.contains(id) || (compactor != null && compactor.containsRecord(id))); } finally { journalLock.readLock().unlock(); } } });
private void checkSize(final int size) throws Exception { checkSize(size, -1); }