/** * Commit data that is already flushed. * <p/> * This API is optional as the writer implements a policy for automatically syncing * the log records in the buffer. The buffered edits can be flushed when the buffer * becomes full or a certain period of time is elapsed. */ @Override public long flushAndSync() throws IOException { checkClosedOrInError("flushAndSync"); LOG.debug("FlushAndSync Started"); long highestTransactionId = 0; BKLogSegmentWriter writer = getCachedLogWriter(); if (null != writer) { highestTransactionId = Math.max(highestTransactionId, FutureUtils.result(writer.commit())); LOG.debug("FlushAndSync Completed"); } else { LOG.debug("FlushAndSync Completed - Nothing to Flush"); } return highestTransactionId; }
/** * All data that has been written to the stream so far will be flushed. * New data can be still written to the stream while flush is ongoing. */ @Override public long setReadyToFlush() throws IOException { checkClosedOrInError("setReadyToFlush"); long highestTransactionId = 0; BKLogSegmentWriter writer = getCachedLogWriter(); if (null != writer) { highestTransactionId = Math.max(highestTransactionId, FutureUtils.result(writer.flush())); } return highestTransactionId; }
@Test(timeout = 60000) public void testSimpleWrite() throws Exception { BKDistributedLogManager dlm = createNewDLM(conf, "distrlog-simplewrite"); BKSyncLogWriter out = dlm.startLogSegmentNonPartitioned(); for (long i = 1; i <= 100; i++) { LogRecord op = DLMTestUtil.getLogRecordInstance(i); out.write(op); } BKLogSegmentWriter perStreamLogWriter = out.getCachedLogWriter(); out.closeAndComplete(); BKLogWriteHandler blplm = dlm.createWriteHandler(true); assertNotNull(zkc.exists(blplm.completedLedgerZNode(1, 100, perStreamLogWriter.getLogSegmentSequenceNumber()), false)); FutureUtils.result(blplm.asyncClose()); }
private long writeAndMarkEndOfStream(DistributedLogManager dlm, long txid) throws Exception { for (long i = 0; i < 3; i++) { long start = txid; BKSyncLogWriter writer = (BKSyncLogWriter)dlm.startLogSegmentNonPartitioned(); for (long j = 1; j <= DEFAULT_SEGMENT_SIZE; j++) { writer.write(DLMTestUtil.getLogRecordInstance(txid++)); } BKLogSegmentWriter perStreamLogWriter = writer.getCachedLogWriter(); if (i < 2) { writer.closeAndComplete(); BKLogWriteHandler blplm = ((BKDistributedLogManager) (dlm)).createWriteHandler(true); assertNotNull(zkc.exists(blplm.completedLedgerZNode(start, txid - 1, perStreamLogWriter.getLogSegmentSequenceNumber()), false)); FutureUtils.result(blplm.asyncClose()); } else { writer.markEndOfStream(); BKLogWriteHandler blplm = ((BKDistributedLogManager) (dlm)).createWriteHandler(true); assertNotNull(zkc.exists(blplm.completedLedgerZNode(start, DistributedLogConstants.MAX_TXID, perStreamLogWriter.getLogSegmentSequenceNumber()), false)); FutureUtils.result(blplm.asyncClose()); } } return txid; }
BKLogSegmentWriter perStreamLogWriter = out.getCachedLogWriter(); out.setReadyToFlush(); out.flushAndSync();
out.write(op); BKLogSegmentWriter perStreamLogWriter = out.getCachedLogWriter(); out.closeAndComplete(); BKLogWriteHandler blplm = ((BKDistributedLogManager) (dlm)).createWriteHandler(true);
BKLogSegmentWriter perStreamLogWriter = writer.getCachedLogWriter();
writer.write(DLMTestUtil.getLogRecordInstance(txid++)); BKLogSegmentWriter perStreamLogWriter = writer.getCachedLogWriter(); writer.closeAndComplete(); BKLogWriteHandler blplm = dlm.createWriteHandler(true);
out.write(op); BKLogSegmentWriter perStreamLogWriter = out.getCachedLogWriter(); out.closeAndComplete(); BKLogWriteHandler blplm = ((BKDistributedLogManager) (dlm)).createWriteHandler(true);
out.write(op); BKLogSegmentWriter perStreamLogWriter = out.getCachedLogWriter(); out.closeAndComplete(); BKLogWriteHandler blplm = dlm.createWriteHandler(true);
out.write(op); BKLogSegmentWriter writer = out.getCachedLogWriter(); out.closeAndComplete(); BKLogWriteHandler blplm = ((BKDistributedLogManager) (dlm)).createWriteHandler(true);