sizeSummed += size; boolean alreadyOffloaded = e.getValue().hasOffloadContext() && e.getValue().getOffloadContext().getComplete(); if (alreadyOffloaded) { alreadyOffloadedSize += size;
hash = (37 * hash) + COMPLETE_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( getComplete());
if (info != null && info.hasOffloadContext() && info.getOffloadContext().getComplete()) { UUID uid = new UUID(info.getOffloadContext().getUidMsb(), info.getOffloadContext().getUidLsb());
.filter(e -> e.getLedgerId() == trimmedLedger).count(), 0); Assert.assertEquals(ledger.getLedgersInfoAsList().stream() .filter(e -> e.getOffloadContext().getComplete()).count(), 0); .filter(e -> e.getOffloadContext().getComplete()).count(), 1); Assert.assertTrue(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getComplete()); Assert.assertEquals(offloader.offloadedLedgers().size(), 1); Assert.assertTrue(offloader.offloadedLedgers().contains(ledger.getLedgersInfoAsList().get(0).getLedgerId()));
.filter(e -> e.getOffloadContext().getComplete()).count(), 2); Assert.assertTrue(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getComplete()); Assert.assertTrue(ledger.getLedgersInfoAsList().get(1).getOffloadContext().getComplete());
result = result && (getComplete() == other.getComplete());
.filter(e -> e.getOffloadContext().getComplete()) .map(e -> e.getLedgerId()).collect(Collectors.toSet()), offloader.offloadedLedgers()); .filter(e -> e.getOffloadContext().getComplete()) .map(e -> e.getLedgerId()).collect(Collectors.toSet()), offloader.offloadedLedgers());
.filter(e -> e.getOffloadContext().getComplete()) .map(e -> e.getLedgerId()).collect(Collectors.toSet()), offloader.offloadedLedgers());
Assert.assertEquals(offloader.offloadedLedgers().size(), 1); Assert.assertTrue(offloader.offloadedLedgers().contains(ledger.getLedgersInfoAsList().get(0).getLedgerId())); Assert.assertTrue(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getComplete()); Assert.assertEquals(ledger.getLedgersInfoAsList().stream() .filter(e -> e.getOffloadContext().getComplete()).count(), 1); Assert.assertEquals(firstUnoffloaded.getLedgerId(), ledger.getLedgersInfoAsList().get(1).getLedgerId()); Assert.assertEquals(firstUnoffloaded.getEntryId(), 0); Assert.assertTrue(offloader.offloadedLedgers().contains(ledger.getLedgersInfoAsList().get(0).getLedgerId())); Assert.assertTrue(offloader.offloadedLedgers().contains(ledger.getLedgersInfoAsList().get(1).getLedgerId())); Assert.assertTrue(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getComplete()); Assert.assertTrue(ledger.getLedgersInfoAsList().get(1).getOffloadContext().getComplete()); Assert.assertEquals(ledger.getLedgersInfoAsList().stream() .filter(e -> e.getOffloadContext().getComplete()).count(), 2); Assert.assertEquals(firstUnoffloaded2.getLedgerId(), ledger.getLedgersInfoAsList().get(2).getLedgerId());
.filter(e -> e.getOffloadContext().getComplete()).count(), 0); try { ledger.offloadPrefix(p); .filter(e -> e.getOffloadContext().getComplete()).count(), 0); .filter(e -> e.getOffloadContext().getComplete()).count(), 0);
.filter(e -> e.getOffloadContext().getComplete()).count(), 0); Assert.assertEquals(ledger.getLedgersInfoAsList().stream() .filter(e -> e.getOffloadContext().hasUidMsb()).count(), 1);
.filter(e -> e.getOffloadContext().getComplete()) .map(e -> e.getLedgerId()).collect(Collectors.toSet()), offloader.offloadedLedgers());
.filter(e -> e.getOffloadContext().getComplete()).count(), 0); Assert.assertEquals(offloader.offloadedLedgers().size(), 0);
.filter(e -> e.getOffloadContext().getComplete()) .map(e -> e.getLedgerId()).collect(Collectors.toSet()), offloader.offloadedLedgers()); Assert.assertEquals(ledger.getLedgersInfoAsList().stream() .filter(e -> e.getOffloadContext().getComplete()).count(), 2); Assert.assertFalse(ledger.getLedgersInfoAsList().get(failIndex).getOffloadContext().getComplete());
@Test public void testOffloadDelete() throws Exception { Set<Pair<Long, UUID>> deleted = ConcurrentHashMap.newKeySet(); CompletableFuture<Set<Long>> errorLedgers = new CompletableFuture<>(); Set<Pair<Long, UUID>> failedOffloads = ConcurrentHashMap.newKeySet(); MockLedgerOffloader offloader = new MockLedgerOffloader(); ManagedLedgerConfig config = new ManagedLedgerConfig(); config.setMaxEntriesPerLedger(10); config.setMinimumRolloverTime(0, TimeUnit.SECONDS); config.setRetentionTime(0, TimeUnit.MINUTES); config.setLedgerOffloader(offloader); ManagedLedgerImpl ledger = (ManagedLedgerImpl)factory.open("my_test_ledger", config); ManagedCursor cursor = ledger.openCursor("foobar"); for (int i = 0; i < 15; i++) { String content = "entry-" + i; ledger.addEntry(content.getBytes()); } Assert.assertEquals(ledger.getLedgersInfoAsList().size(), 2); ledger.offloadPrefix(ledger.getLastConfirmedEntry()); Assert.assertEquals(ledger.getLedgersInfoAsList().size(), 2); Assert.assertEquals(ledger.getLedgersInfoAsList().stream() .filter(e -> e.getOffloadContext().getComplete()).count(), 1); Assert.assertTrue(ledger.getLedgersInfoAsList().get(0).getOffloadContext().getComplete()); long firstLedger = ledger.getLedgersInfoAsList().get(0).getLedgerId(); long secondLedger = ledger.getLedgersInfoAsList().get(1).getLedgerId(); cursor.markDelete(ledger.getLastConfirmedEntry()); assertEventuallyTrue(() -> ledger.getLedgersInfoAsList().size() == 1); Assert.assertEquals(ledger.getLedgersInfoAsList().get(0).getLedgerId(), secondLedger); assertEventuallyTrue(() -> offloader.deletedOffloads().contains(firstLedger)); }
@Test public void testOffloadClosedManagedLedger() throws Exception { MockLedgerOffloader offloader = new MockLedgerOffloader(); ManagedLedgerConfig config = new ManagedLedgerConfig(); config.setMaxEntriesPerLedger(10); config.setMinimumRolloverTime(0, TimeUnit.SECONDS); config.setRetentionTime(10, TimeUnit.MINUTES); config.setLedgerOffloader(offloader); ManagedLedgerImpl ledger = (ManagedLedgerImpl)factory.open("my_test_ledger", config); for (int i = 0; i < 21; i++) { String content = "entry-" + i; ledger.addEntry(content.getBytes()); } Position p = ledger.getLastConfirmedEntry(); ledger.close(); try { ledger.offloadPrefix(p); Assert.fail("Should fail because ML is closed"); } catch (ManagedLedgerException.ManagedLedgerAlreadyClosedException e) { // expected } Assert.assertEquals(ledger.getLedgersInfoAsList().size(), 3); Assert.assertEquals(ledger.getLedgersInfoAsList().stream() .filter(e -> e.getOffloadContext().getComplete()).count(), 0); Assert.assertEquals(offloader.offloadedLedgers().size(), 0); }
@Test public void testOffloadSamePositionTwice() throws Exception { MockLedgerOffloader offloader = new MockLedgerOffloader(); ManagedLedgerConfig config = new ManagedLedgerConfig(); config.setMaxEntriesPerLedger(10); config.setMinimumRolloverTime(0, TimeUnit.SECONDS); config.setRetentionTime(10, TimeUnit.MINUTES); config.setLedgerOffloader(offloader); ManagedLedgerImpl ledger = (ManagedLedgerImpl)factory.open("my_test_ledger", config); int i = 0; for (; i < 25; i++) { String content = "entry-" + i; ledger.addEntry(content.getBytes()); } Assert.assertEquals(ledger.getLedgersInfoAsList().size(), 3); ledger.offloadPrefix(ledger.getLastConfirmedEntry()); Assert.assertEquals(ledger.getLedgersInfoAsList().size(), 3); Assert.assertEquals(ledger.getLedgersInfoAsList().stream() .filter(e -> e.getOffloadContext().getComplete()) .map(e -> e.getLedgerId()).collect(Collectors.toSet()), offloader.offloadedLedgers()); ledger.offloadPrefix(ledger.getLastConfirmedEntry()); Assert.assertEquals(ledger.getLedgersInfoAsList().size(), 3); Assert.assertEquals(ledger.getLedgersInfoAsList().stream() .filter(e -> e.getOffloadContext().getComplete()) .map(e -> e.getLedgerId()).collect(Collectors.toSet()), offloader.offloadedLedgers()); }
public Builder mergeFrom(org.apache.bookkeeper.mledger.proto.MLDataFormats.OffloadContext other) { if (other == org.apache.bookkeeper.mledger.proto.MLDataFormats.OffloadContext.getDefaultInstance()) return this; if (other.hasUidMsb()) { setUidMsb(other.getUidMsb()); } if (other.hasUidLsb()) { setUidLsb(other.getUidLsb()); } if (other.hasComplete()) { setComplete(other.getComplete()); } if (other.hasBookkeeperDeleted()) { setBookkeeperDeleted(other.getBookkeeperDeleted()); } if (other.hasTimestamp()) { setTimestamp(other.getTimestamp()); } if (other.hasDriverMetadata()) { mergeDriverMetadata(other.getDriverMetadata()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; }
@Test public void testOffload() throws Exception { MockLedgerOffloader offloader = new MockLedgerOffloader(); ManagedLedgerConfig config = new ManagedLedgerConfig(); config.setMaxEntriesPerLedger(10); config.setMinimumRolloverTime(0, TimeUnit.SECONDS); config.setRetentionTime(10, TimeUnit.MINUTES); config.setLedgerOffloader(offloader); ManagedLedgerImpl ledger = (ManagedLedgerImpl)factory.open("my_test_ledger", config); int i = 0; for (; i < 25; i++) { String content = "entry-" + i; ledger.addEntry(content.getBytes()); } Assert.assertEquals(ledger.getLedgersInfoAsList().size(), 3); ledger.offloadPrefix(ledger.getLastConfirmedEntry()); Assert.assertEquals(ledger.getLedgersInfoAsList().size(), 3); Assert.assertEquals(ledger.getLedgersInfoAsList().stream() .filter(e -> e.getOffloadContext().getComplete()) .map(e -> e.getLedgerId()).collect(Collectors.toSet()), offloader.offloadedLedgers()); }
private boolean isOffloadedNeedsDelete(OffloadContext offload) { long elapsedMs = clock.millis() - offload.getTimestamp(); return offload.getComplete() && !offload.getBookkeeperDeleted() && elapsedMs > config.getOffloadLedgerDeletionLagMillis(); }