public void readLocationDetails(Location location) throws IOException { Journal.WriteCommand asyncWrite = inflightWrites.get(new Journal.WriteKey(location)); if (asyncWrite != null) { location.setSize(asyncWrite.location.getSize()); location.setType(asyncWrite.location.getType()); } else { file.seek(location.getOffset()); location.setSize(file.readInt()); location.setType(file.readByte()); } }
cur = new Location(); cur.setDataFileId(head.getDataFileId()); cur.setOffset(0); } else { if (location.getSize() == -1) { cur = new Location(location); } else { cur = new Location(location); cur.setOffset(location.getOffset() + location.getSize()); cur.setOffset(cur.getOffset() + cur.getSize()); if (dataFile.getLength() <= cur.getOffset()) { synchronized (currentDataFile) { dataFile = dataFile.getNext(); return null; } else { cur.setDataFileId(dataFile.getDataFileId().intValue()); cur.setOffset(0); if (limit != null && cur.compareTo(limit) >= 0) { LOG.trace("reached limit: {} at: {}", limit, cur); return null; Sequence corruptedRange = dataFile.corruptedBlocks.get(cur.getOffset()); if (corruptedRange != null) { cur.setSize((int) corruptedRange.range());
return false; } else if (!location.equals(other.location)) { return false;
public WriteKey(Location item) { file = item.getDataFileId(); offset = item.getOffset(); // TODO: see if we can build a better hash hash = (int)(file ^ offset); }
@Override public Location readPayload(DataInput dataIn) throws IOException { Location rc = new Location(); rc.setDataFileId(dataIn.readInt()); rc.setOffset(dataIn.readInt()); if (metadata.version >= 6) { rc.setSize(dataIn.readInt()); } return rc; }
public Location readPayload(DataInput dataIn) throws IOException { Location rc = new Location(); rc.setDataFileId(dataIn.readInt()); rc.setOffset(dataIn.readInt()); return rc; }
private boolean appendToBuffer(Journal.WriteCommand write, DataByteArrayOutputStream buff) throws IOException { buff.writeInt(write.location.getSize()); buff.writeByte(write.location.getType()); buff.write(write.data.getData(), write.data.getOffset(), write.data.getLength()); return write.sync | (syncOnComplete && write.onComplete != null); } }
public ByteSequence readRecord(Location location) throws IOException { if (!location.isValid()) { throw new IOException("Invalid location: " + location); } Journal.WriteCommand asyncWrite = inflightWrites.get(new Journal.WriteKey(location)); if (asyncWrite != null) { return asyncWrite.data; } try { if (location.getSize() == Location.NOT_SET) { file.seek(location.getOffset()); location.setSize(file.readInt()); location.setType(file.readByte()); } else { file.seek(location.getOffset() + Journal.RECORD_HEAD_SPACE); } if ((long)location.getOffset() + location.getSize() > dataFile.length) { throw new IOException("Invalid location size: " + location + ", size: " + location.getSize()); } byte[] data = new byte[location.getSize() - Journal.RECORD_HEAD_SPACE]; file.readFully(data); return new ByteSequence(data, 0, data.length); } catch (RuntimeException e) { throw new IOException("Invalid location: " + location + " : " + e, e); } }
@Override public Location storeItem(ByteSequence data, byte type, Runnable onComplete) throws IOException { // Write the packet our internal buffer. int size = data.getLength() + RECORD_HEAD_SPACE; final Location location = new Location(); location.setSize(size); location.setType(type); Journal.WriteCommand write = new Journal.WriteCommand(location, data, onComplete); location.setBatch(enqueue(write)); return location; }
protected Location recoveryCheck(DataFile dataFile) throws IOException { Location location = new Location(); location.setDataFileId(dataFile.getDataFileId()); location.setOffset(0); if (size >= 0 && location.getOffset() + BATCH_CONTROL_RECORD_SIZE + size <= totalFileLength) { if (size == 0) { location.setOffset(location.getOffset() + BATCH_CONTROL_RECORD_SIZE + size); } else { Sequence sequence = new Sequence(location.getOffset(), nextOffset - 1); LOG.warn("Corrupt journal records found in '{}' between offsets: {}", dataFile.getFile(), sequence); dataFile.corruptedBlocks.add(sequence); location.setOffset(nextOffset); } else { break; dataFile.setLength(location.getOffset()); if (existingLen > dataFile.getLength()) { totalLength.addAndGet(dataFile.getLength() - existingLen); if (dataFile.corruptedBlocks.getTail().getLast() + 1 == location.getOffset()) { dataFile.setLength((int) dataFile.corruptedBlocks.removeLastSequence().getFirst());
@Override public void writePayload(Location object, DataOutput dataOut) throws IOException { dataOut.writeInt(object.getDataFileId()); dataOut.writeInt(object.getOffset()); dataOut.writeInt(object.getSize()); }
public void append(Journal.WriteCommand write) throws IOException { this.writes.addLast(write); write.location.setDataFileId(dataFile.getDataFileId()); write.location.setOffset(offset+size); int s = write.location.getSize(); size += s; dataFile.incrementLength(s); journal.addToTotalLength(s); } }
@Override public Location storeItem(ByteSequence data, byte type, Runnable onComplete) throws IOException { checkClosed(); // Write the packet our internal buffer. int size = data.getLength() + Journal.RECORD_HEAD_SPACE; final Location location = new Location(); location.setSize(size); location.setType(type); Journal.WriteCommand write = new Journal.WriteCommand(location, data, onComplete); enqueueWrite(write); return location; }
private Location checkpointProducerAudit() throws IOException { if (metadata.producerSequenceIdTracker == null || metadata.producerSequenceIdTracker.modified()) { ByteArrayOutputStream baos = new ByteArrayOutputStream(); ObjectOutputStream oout = new ObjectOutputStream(baos); oout.writeObject(metadata.producerSequenceIdTracker); oout.flush(); oout.close(); // using completion callback allows a disk sync to be avoided when enableJournalDiskSyncs = false Location location = store(new KahaProducerAuditCommand().setAudit(new Buffer(baos.toByteArray())), nullCompletionCallback); try { location.getLatch().await(); if (location.getException().get() != null) { throw location.getException().get(); } } catch (InterruptedException e) { throw new InterruptedIOException(e.toString()); } return location; } return metadata.producerSequenceIdTrackerLocation; }
); sd.locationIndex.put(tx, location, id); incrementAndAddSizeToStoreStat(tx, command.getDestination(), location.getSize()); decrementAndSubSizeToStoreStat(tx, command.getDestination(), previousKeys.location.getSize()); if (enableSubscriptionStatistics && sd.ackPositions != null && location.getSize() != previousKeys.location.getSize()) { Iterator<Entry<String, SequenceSet>> iter = sd.ackPositions.iterator(tx); while (iter.hasNext()) { Entry<String, SequenceSet> e = iter.next(); if (e.getValue().contains(id)) { incrementAndAddSizeToStoreStat(key(command.getDestination()), e.getKey(), location.getSize()); decrementAndSubSizeToStoreStat(key(command.getDestination()), e.getKey(), previousKeys.location.getSize()); if(!previousKeys.location.equals(location)) { sd.locationIndex.remove(tx, previousKeys.location);
public void corruptRecoveryLocation(Location recoveryPosition) throws IOException { DataFile dataFile = getDataFile(recoveryPosition); // with corruption on recovery we have no faith in the content - slip to the next batch record or eof DataFileAccessor reader = accessorPool.openDataFileAccessor(dataFile); try { RandomAccessFile randomAccessFile = reader.getRaf().getRaf(); randomAccessFile.seek(recoveryPosition.getOffset() + 1); byte[] data = new byte[getWriteBatchSize()]; ByteSequence bs = new ByteSequence(data, 0, randomAccessFile.read(data)); int nextOffset = 0; if (findNextBatchRecord(bs, randomAccessFile) >= 0) { nextOffset = Math.toIntExact(randomAccessFile.getFilePointer() - bs.remaining()); } else { nextOffset = Math.toIntExact(randomAccessFile.length()); } Sequence sequence = new Sequence(recoveryPosition.getOffset(), nextOffset - 1); LOG.warn("Corrupt journal records found in '{}' between offsets: {}", dataFile.getFile(), sequence); // skip corruption on getNextLocation recoveryPosition.setOffset(nextOffset); recoveryPosition.setSize(-1); dataFile.corruptedBlocks.add(sequence); } catch (IOException e) { } finally { accessorPool.closeDataFileAccessor(reader); } }
if (job.getLocation().compareTo(lastAppendLocation) >= 0) { if (scheduler.removeJobAtTime(tx, job.getJobId(), job.getNextTime())) { LOG.trace("Removed Job past last appened in the journal: {}", job.getJobId()); missingJournalFiles.add(job.getLocation().getDataFileId()); if (job.getLastUpdate() != null) { missingJournalFiles.add(job.getLastUpdate().getDataFileId()); int id = dataFile.getDataFileId(); for (long offset : dataFile.getCorruptedBlocks()) { corruptedLocations.add(new Location(id, (int) offset));
public boolean equals(Object o) { boolean result = false; if (o instanceof Location) { result = compareTo((Location)o) == 0; } return result; }
private Location checkpointSubscriptionCommand(KahaSubscriptionCommand subscription) throws IOException { ByteSequence sequence = toByteSequence(subscription); Location location = journal.write(sequence, nullCompletionCallback) ; try { location.getLatch().await(); } catch (InterruptedException e) { throw new InterruptedIOException(e.toString()); } return location; }
@Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((cronEntry == null) ? 0 : cronEntry.hashCode()); result = prime * result + (int) (delay ^ (delay >>> 32)); result = prime * result + ((jobId == null) ? 0 : jobId.hashCode()); result = prime * result + ((location == null) ? 0 : location.hashCode()); result = prime * result + (int) (nextTime ^ (nextTime >>> 32)); result = prime * result + (int) (period ^ (period >>> 32)); result = prime * result + repeat; result = prime * result + (int) (startTime ^ (startTime >>> 32)); return result; }