public ByteSequence read(Location location) throws IOException, IllegalStateException { DataFile dataFile = getDataFile(location); DataFileAccessor reader = accessorPool.openDataFileAccessor(dataFile); ByteSequence rc = null; try { rc = reader.readRecord(location); } finally { accessorPool.closeDataFileAccessor(reader); } return rc; }
public void update(Location location, ByteSequence data, boolean sync) throws IOException { DataFile dataFile = getDataFile(location); DataFileAccessor updater = accessorPool.openDataFileAccessor(dataFile); try { updater.updateRecord(location, data, sync); } finally { accessorPool.closeDataFileAccessor(updater); } }
/** * Construct a Store reader * * @throws IOException */ public DataFileAccessor(Journal dataManager, DataFile dataFile) throws IOException { this.dataFile = dataFile; this.inflightWrites = dataManager.getInflightWrites(); this.file = dataFile.openRandomAccessFile(); }
public void append(Journal.WriteCommand write) throws IOException { this.writes.addLast(write); write.location.setDataFileId(dataFile.getDataFileId()); write.location.setOffset(offset+size); int s = write.location.getSize(); size += s; dataFile.incrementLength(s); journal.addToTotalLength(s); } }
@Override public Location storeItem(ByteSequence data, byte type, Runnable onComplete) throws IOException { // Write the packet our internal buffer. int size = data.getLength() + RECORD_HEAD_SPACE; final Location location = new Location(); location.setSize(size); location.setType(type); Journal.WriteCommand write = new Journal.WriteCommand(location, data, onComplete); location.setBatch(enqueue(write)); return location; }
@Override public Location storeItem(ByteSequence data, byte type, Runnable onComplete) throws IOException { checkClosed(); // Write the packet our internal buffer. int size = data.getLength() + Journal.RECORD_HEAD_SPACE; final Location location = new Location(); location.setSize(size); location.setType(type); Journal.WriteCommand write = new Journal.WriteCommand(location, data, onComplete); enqueueWrite(write); return location; }
public Location getFirstLocation() throws IllegalStateException, IOException { if( dataFiles.isEmpty() ) { return null; } DataFile first = dataFiles.getHead(); Location cur = new Location(); cur.setDataFileId(first.getDataFileId()); cur.setOffset(0); cur.setSize(0); return getNextLocation(cur); }
/** * Construct a Store writer */ public DataFileAppender(Journal dataManager) { this.journal = dataManager; this.inflightWrites = this.journal.getInflightWrites(); this.maxWriteBatchSize = this.journal.getWriteBatchSize(); this.syncOnComplete = this.journal.isEnableAsyncDiskSync(); this.periodicSync = JournalDiskSyncStrategy.PERIODIC.equals( this.journal.getJournalDiskSyncStrategy()); }
private DataFile newDataFile() throws IOException { int nextNum = nextDataFileId++; File file = getFile(nextNum); DataFile nextWriteFile = new DataFile(file, nextNum); preallocateEntireJournalDataFile(nextWriteFile.appendRandomAccessFile()); return nextWriteFile; }
synchronized void closeDataFileAccessor(DataFileAccessor reader) { Pool pool = pools.get(reader.getDataFile().getDataFileId()); if (pool == null || closed) { reader.dispose(); } else { pool.closeDataFileReader(reader); } }
public WriteBatch(DataFile dataFile,int offset) { this.dataFile = dataFile; this.offset = offset; this.dataFile.incrementLength(Journal.BATCH_CONTROL_RECORD_SIZE); this.size=Journal.BATCH_CONTROL_RECORD_SIZE; journal.addToTotalLength(Journal.BATCH_CONTROL_RECORD_SIZE); }
private Location getNextInitializedLocation(Location location) throws IOException { Location mayNotBeInitialized = journal.getNextLocation(location); if (location.getSize() == NOT_SET && mayNotBeInitialized != null && mayNotBeInitialized.getSize() != NOT_SET) { // need to init size and type to skip return journal.getNextLocation(mayNotBeInitialized); } else { return mayNotBeInitialized; } }
public boolean canAppend(Journal.WriteCommand write) { int newSize = size + write.location.getSize(); if (newSize >= maxWriteBatchSize || offset+newSize > journal.getMaxFileLength() ) { return false; } return true; }
@Override protected DataFileAppender.WriteBatch newWriteBatch(Journal.WriteCommand write, DataFile file) throws IOException { return new WriteBatch(file, file.getLength(), write); }
public WriteKey(Location item) { file = item.getDataFileId(); offset = item.getOffset(); // TODO: see if we can build a better hash hash = (int)(file ^ offset); }
/** * Construct a Store writer */ public TargetedDataFileAppender(Journal journal, DataFile target) { this.journal = journal; this.target = target; this.inflightWrites = this.journal.getInflightWrites(); this.maxWriteBatchSize = this.journal.getWriteBatchSize(); }
@Override public void run() { processQueue(); } };
@Override public void append(Journal.WriteCommand write) throws IOException { super.append(write); forceToDisk |= appendToBuffer(write, buff); } }
public void append(Journal.WriteCommand write) throws IOException { this.writes.addLast(write); write.location.setDataFileId(dataFile.getDataFileId()); write.location.setOffset(offset + size); int s = write.location.getSize(); size += s; dataFile.incrementLength(s); journal.addToTotalLength(s); } }
public WriteBatch(DataFile dataFile, int offset) { this.dataFile = dataFile; this.offset = offset; this.dataFile.incrementLength(Journal.BATCH_CONTROL_RECORD_SIZE); this.size = Journal.BATCH_CONTROL_RECORD_SIZE; journal.addToTotalLength(Journal.BATCH_CONTROL_RECORD_SIZE); }