/** * Drop segments in background. The segments should be pushed (in batch ingestion) or published (in streaming * ingestion) before being dropped. * * @param segmentsAndMetadata result of pushing or publishing * * @return a future for dropping segments */ ListenableFuture<SegmentsAndMetadata> dropInBackground(SegmentsAndMetadata segmentsAndMetadata) { log.info("Dropping segments[%s]", segmentsAndMetadata.getSegments()); final ListenableFuture<?> dropFuture = Futures.allAsList( segmentsAndMetadata .getSegments() .stream() .map(segment -> appenderator.drop(SegmentIdWithShardSpec.fromDataSegment(segment))) .collect(Collectors.toList()) ); return Futures.transform( dropFuture, (Function<Object, SegmentsAndMetadata>) x -> { final Object metadata = segmentsAndMetadata.getCommitMetadata(); return new SegmentsAndMetadata( segmentsAndMetadata.getSegments(), metadata == null ? null : ((AppenderatorDriverMetadata) metadata).getCallerMetadata() ); } ); }
log.info( "Publishing segments with commitMetadata[%s]: [%s]", segmentsAndMetadata.getCommitMetadata(), Joiner.on(", ").join(segmentsAndMetadata.getSegments()) ); final Object metadata = segmentsAndMetadata.getCommitMetadata(); final boolean published = publisher.publishSegments( ImmutableSet.copyOf(segmentsAndMetadata.getSegments()),
SegmentIdWithShardSpec::fromDataSegment) .collect(Collectors.toList()); final Object metadata = Preconditions.checkNotNull(segmentsAndMetadata.getCommitMetadata(), "commitMetadata");
"Handoff completed for segments %s with metadata[%s].", Lists.transform(handedOff.getSegments(), DataSegment::getId), Preconditions.checkNotNull(handedOff.getCommitMetadata(), "commitMetadata") );
"Published segments %s with metadata[%s].", publishedSegmentIds, Preconditions.checkNotNull(published.getCommitMetadata(), "commitMetadata") ); "Handoff completed for segments %s with metadata[%s]", Lists.transform(handedOff.getSegments(), DataSegment::getId), Preconditions.checkNotNull(handedOff.getCommitMetadata(), "commitMetadata") );
.get(HANDOFF_CONDITION_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS); Assert.assertEquals(numSegments, segmentsAndMetadata.getSegments().size()); Assert.assertEquals(numSegments * MAX_ROWS_PER_SEGMENT, segmentsAndMetadata.getCommitMetadata());
@Test public void testSimple() throws Exception { Assert.assertNull(driver.startJob()); for (InputRow row : ROWS) { Assert.assertTrue(driver.add(row, "dummy").isOk()); } checkSegmentStates(2, SegmentState.APPENDING); driver.pushAllAndClear(TIMEOUT); checkSegmentStates(2, SegmentState.PUSHED_AND_DROPPED); final SegmentsAndMetadata published = driver.publishAll(makeOkPublisher()).get( TIMEOUT, TimeUnit.MILLISECONDS ); Assert.assertEquals( ImmutableSet.of( new SegmentIdWithShardSpec(DATA_SOURCE, Intervals.of("2000/PT1H"), VERSION, new NumberedShardSpec(0, 0)), new SegmentIdWithShardSpec(DATA_SOURCE, Intervals.of("2000T01/PT1H"), VERSION, new NumberedShardSpec(0, 0)) ), published.getSegments() .stream() .map(SegmentIdWithShardSpec::fromDataSegment) .collect(Collectors.toSet()) ); Assert.assertNull(published.getCommitMetadata()); }
@Test(timeout = 60_000L) public void testSimple() throws Exception { final TestCommitterSupplier<Integer> committerSupplier = new TestCommitterSupplier<>(); Assert.assertNull(driver.startJob()); for (int i = 0; i < ROWS.size(); i++) { committerSupplier.setMetadata(i + 1); Assert.assertTrue(driver.add(ROWS.get(i), "dummy", committerSupplier, false, true).isOk()); } final SegmentsAndMetadata published = driver.publish( makeOkPublisher(), committerSupplier.get(), ImmutableList.of("dummy") ).get(PUBLISH_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS); while (driver.getSegments().containsKey("dummy")) { Thread.sleep(100); } final SegmentsAndMetadata segmentsAndMetadata = driver.registerHandoff(published) .get(HANDOFF_CONDITION_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS); Assert.assertEquals( ImmutableSet.of( new SegmentIdWithShardSpec(DATA_SOURCE, Intervals.of("2000/PT1H"), VERSION, new NumberedShardSpec(0, 0)), new SegmentIdWithShardSpec(DATA_SOURCE, Intervals.of("2000T01/PT1H"), VERSION, new NumberedShardSpec(0, 0)) ), asIdentifiers(segmentsAndMetadata.getSegments()) ); Assert.assertEquals(3, segmentsAndMetadata.getCommitMetadata()); }
); Assert.assertEquals(1, segmentsAndMetadata.getCommitMetadata()); ); Assert.assertEquals(i + 1, segmentsAndMetadata.getCommitMetadata()); ); Assert.assertEquals(3, segmentsAndMetadata.getCommitMetadata());
); Assert.assertEquals(3, handedoffFromSequence0.getCommitMetadata()); Assert.assertEquals(3, handedoffFromSequence1.getCommitMetadata());
false ).get(); Assert.assertEquals(ImmutableMap.of("x", "3"), (Map<String, String>) segmentsAndMetadata.getCommitMetadata()); Assert.assertEquals( IDENTIFIERS.subList(0, 2),
@Test public void testIncrementalPush() throws Exception { Assert.assertNull(driver.startJob()); int i = 0; for (InputRow row : ROWS) { Assert.assertTrue(driver.add(row, "dummy").isOk()); checkSegmentStates(1, SegmentState.APPENDING); checkSegmentStates(i, SegmentState.PUSHED_AND_DROPPED); driver.pushAllAndClear(TIMEOUT); checkSegmentStates(0, SegmentState.APPENDING); checkSegmentStates(++i, SegmentState.PUSHED_AND_DROPPED); } final SegmentsAndMetadata published = driver.publishAll(makeOkPublisher()).get( TIMEOUT, TimeUnit.MILLISECONDS ); Assert.assertEquals( ImmutableSet.of( new SegmentIdWithShardSpec(DATA_SOURCE, Intervals.of("2000/PT1H"), VERSION, new NumberedShardSpec(0, 0)), new SegmentIdWithShardSpec(DATA_SOURCE, Intervals.of("2000T01/PT1H"), VERSION, new NumberedShardSpec(0, 0)), new SegmentIdWithShardSpec(DATA_SOURCE, Intervals.of("2000T01/PT1H"), VERSION, new NumberedShardSpec(1, 0)) ), published.getSegments() .stream() .map(SegmentIdWithShardSpec::fromDataSegment) .collect(Collectors.toSet()) ); Assert.assertNull(published.getCommitMetadata()); }
/** * Drop segments in background. The segments should be pushed (in batch ingestion) or published (in streaming * ingestion) before being dropped. * * @param segmentsAndMetadata result of pushing or publishing * * @return a future for dropping segments */ ListenableFuture<SegmentsAndMetadata> dropInBackground(SegmentsAndMetadata segmentsAndMetadata) { log.info("Dropping segments[%s]", segmentsAndMetadata.getSegments()); final ListenableFuture<?> dropFuture = Futures.allAsList( segmentsAndMetadata .getSegments() .stream() .map(segment -> appenderator.drop(SegmentIdentifier.fromDataSegment(segment))) .collect(Collectors.toList()) ); return Futures.transform( dropFuture, (Function<Object, SegmentsAndMetadata>) x -> { final Object metadata = segmentsAndMetadata.getCommitMetadata(); return new SegmentsAndMetadata( segmentsAndMetadata.getSegments(), metadata == null ? null : ((AppenderatorDriverMetadata) metadata).getCallerMetadata() ); } ); }
log.info( "Publishing segments with commitMetadata[%s]: [%s]", segmentsAndMetadata.getCommitMetadata(), Joiner.on(", ").join(segmentsAndMetadata.getSegments()) ); final Object metadata = segmentsAndMetadata.getCommitMetadata(); final boolean published = publisher.publishSegments( ImmutableSet.copyOf(segmentsAndMetadata.getSegments()),
.map(SegmentIdentifier::fromDataSegment) .collect(Collectors.toList()); final Object metadata = Preconditions.checkNotNull(segmentsAndMetadata.getCommitMetadata(), "commitMetadata");