congrats Icon
New! Announcing our next generation AI code completions
Read here
Tabnine Logo
SegmentsAndMetadata.getCommitMetadata
Code IndexAdd Tabnine to your IDE (free)

How to use
getCommitMetadata
method
in
org.apache.druid.segment.realtime.appenderator.SegmentsAndMetadata

Best Java code snippets using org.apache.druid.segment.realtime.appenderator.SegmentsAndMetadata.getCommitMetadata (Showing top 15 results out of 315)

origin: apache/incubator-druid

/**
 * Drop segments in background. The segments should be pushed (in batch ingestion) or published (in streaming
 * ingestion) before being dropped.
 *
 * @param segmentsAndMetadata result of pushing or publishing
 *
 * @return a future for dropping segments
 */
ListenableFuture<SegmentsAndMetadata> dropInBackground(SegmentsAndMetadata segmentsAndMetadata)
{
 log.info("Dropping segments[%s]", segmentsAndMetadata.getSegments());
 final ListenableFuture<?> dropFuture = Futures.allAsList(
   segmentsAndMetadata
     .getSegments()
     .stream()
     .map(segment -> appenderator.drop(SegmentIdWithShardSpec.fromDataSegment(segment)))
     .collect(Collectors.toList())
 );
 return Futures.transform(
   dropFuture,
   (Function<Object, SegmentsAndMetadata>) x -> {
    final Object metadata = segmentsAndMetadata.getCommitMetadata();
    return new SegmentsAndMetadata(
      segmentsAndMetadata.getSegments(),
      metadata == null ? null : ((AppenderatorDriverMetadata) metadata).getCallerMetadata()
    );
   }
 );
}
origin: apache/incubator-druid

log.info(
  "Publishing segments with commitMetadata[%s]: [%s]",
  segmentsAndMetadata.getCommitMetadata(),
  Joiner.on(", ").join(segmentsAndMetadata.getSegments())
);
 final Object metadata = segmentsAndMetadata.getCommitMetadata();
 final boolean published = publisher.publishSegments(
   ImmutableSet.copyOf(segmentsAndMetadata.getSegments()),
origin: apache/incubator-druid

                                         SegmentIdWithShardSpec::fromDataSegment)
                                       .collect(Collectors.toList());
final Object metadata = Preconditions.checkNotNull(segmentsAndMetadata.getCommitMetadata(), "commitMetadata");
origin: apache/incubator-druid

  "Handoff completed for segments %s with metadata[%s].",
  Lists.transform(handedOff.getSegments(), DataSegment::getId),
  Preconditions.checkNotNull(handedOff.getCommitMetadata(), "commitMetadata")
);
origin: apache/incubator-druid

  "Published segments %s with metadata[%s].",
  publishedSegmentIds,
  Preconditions.checkNotNull(published.getCommitMetadata(), "commitMetadata")
);
   "Handoff completed for segments %s with metadata[%s]",
   Lists.transform(handedOff.getSegments(), DataSegment::getId),
   Preconditions.checkNotNull(handedOff.getCommitMetadata(), "commitMetadata")
 );
origin: apache/incubator-druid

                           .get(HANDOFF_CONDITION_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
Assert.assertEquals(numSegments, segmentsAndMetadata.getSegments().size());
Assert.assertEquals(numSegments * MAX_ROWS_PER_SEGMENT, segmentsAndMetadata.getCommitMetadata());
origin: apache/incubator-druid

@Test
public void testSimple() throws Exception
{
 Assert.assertNull(driver.startJob());
 for (InputRow row : ROWS) {
  Assert.assertTrue(driver.add(row, "dummy").isOk());
 }
 checkSegmentStates(2, SegmentState.APPENDING);
 driver.pushAllAndClear(TIMEOUT);
 checkSegmentStates(2, SegmentState.PUSHED_AND_DROPPED);
 final SegmentsAndMetadata published = driver.publishAll(makeOkPublisher()).get(
   TIMEOUT,
   TimeUnit.MILLISECONDS
 );
 Assert.assertEquals(
   ImmutableSet.of(
     new SegmentIdWithShardSpec(DATA_SOURCE, Intervals.of("2000/PT1H"), VERSION, new NumberedShardSpec(0, 0)),
     new SegmentIdWithShardSpec(DATA_SOURCE, Intervals.of("2000T01/PT1H"), VERSION, new NumberedShardSpec(0, 0))
   ),
   published.getSegments()
        .stream()
        .map(SegmentIdWithShardSpec::fromDataSegment)
        .collect(Collectors.toSet())
 );
 Assert.assertNull(published.getCommitMetadata());
}
origin: apache/incubator-druid

@Test(timeout = 60_000L)
public void testSimple() throws Exception
{
 final TestCommitterSupplier<Integer> committerSupplier = new TestCommitterSupplier<>();
 Assert.assertNull(driver.startJob());
 for (int i = 0; i < ROWS.size(); i++) {
  committerSupplier.setMetadata(i + 1);
  Assert.assertTrue(driver.add(ROWS.get(i), "dummy", committerSupplier, false, true).isOk());
 }
 final SegmentsAndMetadata published = driver.publish(
   makeOkPublisher(),
   committerSupplier.get(),
   ImmutableList.of("dummy")
 ).get(PUBLISH_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
 while (driver.getSegments().containsKey("dummy")) {
  Thread.sleep(100);
 }
 final SegmentsAndMetadata segmentsAndMetadata = driver.registerHandoff(published)
                            .get(HANDOFF_CONDITION_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
 Assert.assertEquals(
   ImmutableSet.of(
     new SegmentIdWithShardSpec(DATA_SOURCE, Intervals.of("2000/PT1H"), VERSION, new NumberedShardSpec(0, 0)),
     new SegmentIdWithShardSpec(DATA_SOURCE, Intervals.of("2000T01/PT1H"), VERSION, new NumberedShardSpec(0, 0))
   ),
   asIdentifiers(segmentsAndMetadata.getSegments())
 );
 Assert.assertEquals(3, segmentsAndMetadata.getCommitMetadata());
}
origin: apache/incubator-druid

 );
 Assert.assertEquals(1, segmentsAndMetadata.getCommitMetadata());
 );
 Assert.assertEquals(i + 1, segmentsAndMetadata.getCommitMetadata());
);
Assert.assertEquals(3, segmentsAndMetadata.getCommitMetadata());
origin: apache/incubator-druid

);
Assert.assertEquals(3, handedoffFromSequence0.getCommitMetadata());
Assert.assertEquals(3, handedoffFromSequence1.getCommitMetadata());
origin: apache/incubator-druid

  false
).get();
Assert.assertEquals(ImmutableMap.of("x", "3"), (Map<String, String>) segmentsAndMetadata.getCommitMetadata());
Assert.assertEquals(
  IDENTIFIERS.subList(0, 2),
origin: apache/incubator-druid

@Test
public void testIncrementalPush() throws Exception
{
 Assert.assertNull(driver.startJob());
 int i = 0;
 for (InputRow row : ROWS) {
  Assert.assertTrue(driver.add(row, "dummy").isOk());
  checkSegmentStates(1, SegmentState.APPENDING);
  checkSegmentStates(i, SegmentState.PUSHED_AND_DROPPED);
  driver.pushAllAndClear(TIMEOUT);
  checkSegmentStates(0, SegmentState.APPENDING);
  checkSegmentStates(++i, SegmentState.PUSHED_AND_DROPPED);
 }
 final SegmentsAndMetadata published = driver.publishAll(makeOkPublisher()).get(
   TIMEOUT,
   TimeUnit.MILLISECONDS
 );
 Assert.assertEquals(
   ImmutableSet.of(
     new SegmentIdWithShardSpec(DATA_SOURCE, Intervals.of("2000/PT1H"), VERSION, new NumberedShardSpec(0, 0)),
     new SegmentIdWithShardSpec(DATA_SOURCE, Intervals.of("2000T01/PT1H"), VERSION, new NumberedShardSpec(0, 0)),
     new SegmentIdWithShardSpec(DATA_SOURCE, Intervals.of("2000T01/PT1H"), VERSION, new NumberedShardSpec(1, 0))
   ),
   published.getSegments()
        .stream()
        .map(SegmentIdWithShardSpec::fromDataSegment)
        .collect(Collectors.toSet())
 );
 Assert.assertNull(published.getCommitMetadata());
}
origin: org.apache.druid/druid-server

/**
 * Drop segments in background. The segments should be pushed (in batch ingestion) or published (in streaming
 * ingestion) before being dropped.
 *
 * @param segmentsAndMetadata result of pushing or publishing
 *
 * @return a future for dropping segments
 */
ListenableFuture<SegmentsAndMetadata> dropInBackground(SegmentsAndMetadata segmentsAndMetadata)
{
 log.info("Dropping segments[%s]", segmentsAndMetadata.getSegments());
 final ListenableFuture<?> dropFuture = Futures.allAsList(
   segmentsAndMetadata
     .getSegments()
     .stream()
     .map(segment -> appenderator.drop(SegmentIdentifier.fromDataSegment(segment)))
     .collect(Collectors.toList())
 );
 return Futures.transform(
   dropFuture,
   (Function<Object, SegmentsAndMetadata>) x -> {
    final Object metadata = segmentsAndMetadata.getCommitMetadata();
    return new SegmentsAndMetadata(
      segmentsAndMetadata.getSegments(),
      metadata == null ? null : ((AppenderatorDriverMetadata) metadata).getCallerMetadata()
    );
   }
 );
}
origin: org.apache.druid/druid-server

log.info(
  "Publishing segments with commitMetadata[%s]: [%s]",
  segmentsAndMetadata.getCommitMetadata(),
  Joiner.on(", ").join(segmentsAndMetadata.getSegments())
);
 final Object metadata = segmentsAndMetadata.getCommitMetadata();
 final boolean published = publisher.publishSegments(
   ImmutableSet.copyOf(segmentsAndMetadata.getSegments()),
origin: org.apache.druid/druid-server

                                    .map(SegmentIdentifier::fromDataSegment)
                                    .collect(Collectors.toList());
final Object metadata = Preconditions.checkNotNull(segmentsAndMetadata.getCommitMetadata(), "commitMetadata");
org.apache.druid.segment.realtime.appenderatorSegmentsAndMetadatagetCommitMetadata

Popular methods of SegmentsAndMetadata

  • getSegments
  • <init>

Popular in Java

  • Start an intent from android
  • getSharedPreferences (Context)
  • orElseThrow (Optional)
    Return the contained value, if present, otherwise throw an exception to be created by the provided s
  • putExtra (Intent)
  • BufferedWriter (java.io)
    Wraps an existing Writer and buffers the output. Expensive interaction with the underlying reader is
  • File (java.io)
    An "abstract" representation of a file system entity identified by a pathname. The pathname may be a
  • ThreadPoolExecutor (java.util.concurrent)
    An ExecutorService that executes each submitted task using one of possibly several pooled threads, n
  • Manifest (java.util.jar)
    The Manifest class is used to obtain attribute information for a JarFile and its entries.
  • Servlet (javax.servlet)
    Defines methods that all servlets must implement. A servlet is a small Java program that runs within
  • ServletException (javax.servlet)
    Defines a general exception a servlet can throw when it encounters difficulty.
  • Top 12 Jupyter Notebook Extensions
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimAtomGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyStudentsTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now