Tabnine Logo
SegmentsAndMetadata.getSegments
Code IndexAdd Tabnine to your IDE (free)

How to use
getSegments
method
in
org.apache.druid.segment.realtime.appenderator.SegmentsAndMetadata

Best Java code snippets using org.apache.druid.segment.realtime.appenderator.SegmentsAndMetadata.getSegments (Showing top 20 results out of 315)

origin: apache/incubator-druid

@Override
public void onSuccess(SegmentsAndMetadata result)
{
 // Immediately publish after pushing
 for (DataSegment pushedSegment : result.getSegments()) {
  try {
   segmentPublisher.publishSegment(pushedSegment);
  }
  catch (Exception e) {
   errorHandler.apply(e);
  }
 }
 log.info("Published [%,d] sinks.", segmentsToPush.size());
}
origin: apache/incubator-druid

(Function<SegmentsAndMetadata, SegmentsAndMetadata>) segmentsAndMetadata -> {
 final Set<SegmentIdWithShardSpec> pushedSegments = segmentsAndMetadata.getSegments().stream()
                                    .map(
                                      SegmentIdWithShardSpec::fromDataSegment)
 if (!pushedSegments.equals(Sets.newHashSet(segmentIdentifiers))) {
  log.warn(
    "Removing segments from deep storage because sanity check failed: %s", segmentsAndMetadata.getSegments()
  );
  segmentsAndMetadata.getSegments().forEach(dataSegmentKiller::killQuietly);
origin: apache/incubator-druid

/**
 * Drop segments in background. The segments should be pushed (in batch ingestion) or published (in streaming
 * ingestion) before being dropped.
 *
 * @param segmentsAndMetadata result of pushing or publishing
 *
 * @return a future for dropping segments
 */
ListenableFuture<SegmentsAndMetadata> dropInBackground(SegmentsAndMetadata segmentsAndMetadata)
{
 log.info("Dropping segments[%s]", segmentsAndMetadata.getSegments());
 final ListenableFuture<?> dropFuture = Futures.allAsList(
   segmentsAndMetadata
     .getSegments()
     .stream()
     .map(segment -> appenderator.drop(SegmentIdWithShardSpec.fromDataSegment(segment)))
     .collect(Collectors.toList())
 );
 return Futures.transform(
   dropFuture,
   (Function<Object, SegmentsAndMetadata>) x -> {
    final Object metadata = segmentsAndMetadata.getCommitMetadata();
    return new SegmentsAndMetadata(
      segmentsAndMetadata.getSegments(),
      metadata == null ? null : ((AppenderatorDriverMetadata) metadata).getCallerMetadata()
    );
   }
 );
}
origin: apache/incubator-druid

if (segmentsAndMetadata.getSegments().isEmpty()) {
 log.info("Nothing to publish, skipping publish step.");
} else {
   "Publishing segments with commitMetadata[%s]: [%s]",
   segmentsAndMetadata.getCommitMetadata(),
   Joiner.on(", ").join(segmentsAndMetadata.getSegments())
 );
  final Object metadata = segmentsAndMetadata.getCommitMetadata();
  final boolean published = publisher.publishSegments(
    ImmutableSet.copyOf(segmentsAndMetadata.getSegments()),
    metadata == null ? null : ((AppenderatorDriverMetadata) metadata).getCallerMetadata()
  ).isSuccess();
   segmentsAndMetadata.getSegments().forEach(dataSegmentKiller::killQuietly);
     .getSegments()
     .stream()
     .map(SegmentIdWithShardSpec::fromDataSegment)
              .equals(Sets.newHashSet(segmentsAndMetadata.getSegments()))) {
    log.info("Our segments really do exist, awaiting handoff.");
   } else {
  log.warn(e, "Failed publish, not removing segments: %s", segmentsAndMetadata.getSegments());
  throw Throwables.propagate(e);
origin: apache/incubator-druid

.getSegments()
.stream()
.collect(Collectors.toMap(SegmentIdWithShardSpec::fromDataSegment, Function.identity()));
origin: apache/incubator-druid

final List<SegmentIdWithShardSpec> waitingSegmentIdList = segmentsAndMetadata.getSegments().stream()
                                       .map(
                                         SegmentIdWithShardSpec::fromDataSegment)
 return Futures.immediateFuture(
   new SegmentsAndMetadata(
     segmentsAndMetadata.getSegments(),
     ((AppenderatorDriverMetadata) metadata).getCallerMetadata()
origin: apache/incubator-druid

    pushedSegments.addAll(pushed.getSegments());
    log.info("Pushed segments[%s]", pushed.getSegments());
pushedSegments.addAll(pushed.getSegments());
log.info("Pushed segments[%s]", pushed.getSegments());
origin: apache/incubator-druid

log.info(
  "Handoff completed for segments %s with metadata[%s].",
  Lists.transform(handedOff.getSegments(), DataSegment::getId),
  Preconditions.checkNotNull(handedOff.getCommitMetadata(), "commitMetadata")
);
origin: apache/incubator-druid

).get();
List<?> publishedSegmentIds = Lists.transform(published.getSegments(), DataSegment::getId);
log.info(
  "Published segments %s with metadata[%s].",
 log.info(
   "Handoff completed for segments %s with metadata[%s]",
   Lists.transform(handedOff.getSegments(), DataSegment::getId),
   Preconditions.checkNotNull(handedOff.getCommitMetadata(), "commitMetadata")
 );
origin: apache/incubator-druid

Assert.assertEquals(numSegments, segmentsAndMetadata.getSegments().size());
Assert.assertEquals(numSegments * MAX_ROWS_PER_SEGMENT, segmentsAndMetadata.getCommitMetadata());
origin: apache/incubator-druid

    log.info("Pushed segments[%s]", pushed.getSegments());
log.info("Pushed segments[%s]", pushed.getSegments());
   buildSegmentsMeters.getThrownAway()
 );
 log.info("Published segments: %s", Lists.transform(published.getSegments(), DataSegment::getId));
origin: apache/incubator-druid

@Test(timeout = 60_000L)
public void testSimple() throws Exception
{
 final TestCommitterSupplier<Integer> committerSupplier = new TestCommitterSupplier<>();
 Assert.assertNull(driver.startJob());
 for (int i = 0; i < ROWS.size(); i++) {
  committerSupplier.setMetadata(i + 1);
  Assert.assertTrue(driver.add(ROWS.get(i), "dummy", committerSupplier, false, true).isOk());
 }
 final SegmentsAndMetadata published = driver.publish(
   makeOkPublisher(),
   committerSupplier.get(),
   ImmutableList.of("dummy")
 ).get(PUBLISH_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
 while (driver.getSegments().containsKey("dummy")) {
  Thread.sleep(100);
 }
 final SegmentsAndMetadata segmentsAndMetadata = driver.registerHandoff(published)
                            .get(HANDOFF_CONDITION_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
 Assert.assertEquals(
   ImmutableSet.of(
     new SegmentIdWithShardSpec(DATA_SOURCE, Intervals.of("2000/PT1H"), VERSION, new NumberedShardSpec(0, 0)),
     new SegmentIdWithShardSpec(DATA_SOURCE, Intervals.of("2000T01/PT1H"), VERSION, new NumberedShardSpec(0, 0))
   ),
   asIdentifiers(segmentsAndMetadata.getSegments())
 );
 Assert.assertEquals(3, segmentsAndMetadata.getCommitMetadata());
}
origin: apache/incubator-druid

@Test
public void testSimple() throws Exception
{
 Assert.assertNull(driver.startJob());
 for (InputRow row : ROWS) {
  Assert.assertTrue(driver.add(row, "dummy").isOk());
 }
 checkSegmentStates(2, SegmentState.APPENDING);
 driver.pushAllAndClear(TIMEOUT);
 checkSegmentStates(2, SegmentState.PUSHED_AND_DROPPED);
 final SegmentsAndMetadata published = driver.publishAll(makeOkPublisher()).get(
   TIMEOUT,
   TimeUnit.MILLISECONDS
 );
 Assert.assertEquals(
   ImmutableSet.of(
     new SegmentIdWithShardSpec(DATA_SOURCE, Intervals.of("2000/PT1H"), VERSION, new NumberedShardSpec(0, 0)),
     new SegmentIdWithShardSpec(DATA_SOURCE, Intervals.of("2000T01/PT1H"), VERSION, new NumberedShardSpec(0, 0))
   ),
   published.getSegments()
        .stream()
        .map(SegmentIdWithShardSpec::fromDataSegment)
        .collect(Collectors.toSet())
 );
 Assert.assertNull(published.getCommitMetadata());
}
origin: apache/incubator-druid

  sorted(
    Lists.transform(
      segmentsAndMetadata.getSegments(),
      new Function<DataSegment, SegmentIdWithShardSpec>()
Assert.assertEquals(sorted(tester.getPushedSegments()), sorted(segmentsAndMetadata.getSegments()));
origin: apache/incubator-druid

     new SegmentIdWithShardSpec(DATA_SOURCE, Intervals.of("2000/PT1H"), VERSION, new NumberedShardSpec(0, 0))
   ),
   asIdentifiers(segmentsAndMetadata.getSegments())
 );
   asIdentifiers(segmentsAndMetadata.getSegments())
 );
  asIdentifiers(segmentsAndMetadata.getSegments())
);
origin: apache/incubator-druid

    new SegmentIdWithShardSpec(DATA_SOURCE, Intervals.of("2000/PT1H"), VERSION, new NumberedShardSpec(0, 0))
  ),
  asIdentifiers(handedoffFromSequence0.getSegments())
);
    new SegmentIdWithShardSpec(DATA_SOURCE, Intervals.of("2000T01/PT1H"), VERSION, new NumberedShardSpec(0, 0))
  ),
  asIdentifiers(handedoffFromSequence1.getSegments())
);
origin: apache/incubator-druid

@Test
public void testIncrementalPush() throws Exception
{
 Assert.assertNull(driver.startJob());
 int i = 0;
 for (InputRow row : ROWS) {
  Assert.assertTrue(driver.add(row, "dummy").isOk());
  checkSegmentStates(1, SegmentState.APPENDING);
  checkSegmentStates(i, SegmentState.PUSHED_AND_DROPPED);
  driver.pushAllAndClear(TIMEOUT);
  checkSegmentStates(0, SegmentState.APPENDING);
  checkSegmentStates(++i, SegmentState.PUSHED_AND_DROPPED);
 }
 final SegmentsAndMetadata published = driver.publishAll(makeOkPublisher()).get(
   TIMEOUT,
   TimeUnit.MILLISECONDS
 );
 Assert.assertEquals(
   ImmutableSet.of(
     new SegmentIdWithShardSpec(DATA_SOURCE, Intervals.of("2000/PT1H"), VERSION, new NumberedShardSpec(0, 0)),
     new SegmentIdWithShardSpec(DATA_SOURCE, Intervals.of("2000T01/PT1H"), VERSION, new NumberedShardSpec(0, 0)),
     new SegmentIdWithShardSpec(DATA_SOURCE, Intervals.of("2000T01/PT1H"), VERSION, new NumberedShardSpec(1, 0))
   ),
   published.getSegments()
        .stream()
        .map(SegmentIdWithShardSpec::fromDataSegment)
        .collect(Collectors.toSet())
 );
 Assert.assertNull(published.getCommitMetadata());
}
origin: org.apache.druid/druid-server

@Override
public void onSuccess(SegmentsAndMetadata result)
{
 // Immediately publish after pushing
 for (DataSegment pushedSegment : result.getSegments()) {
  try {
   segmentPublisher.publishSegment(pushedSegment);
  }
  catch (Exception e) {
   errorHandler.apply(e);
  }
 }
 log.info("Published [%,d] sinks.", segmentsToPush.size());
}
origin: org.apache.druid/druid-server

(Function<SegmentsAndMetadata, SegmentsAndMetadata>) segmentsAndMetadata -> {
 final Set<SegmentIdentifier> pushedSegments = segmentsAndMetadata.getSegments().stream()
                                  .map(SegmentIdentifier::fromDataSegment)
                                  .collect(Collectors.toSet());
 if (!pushedSegments.equals(Sets.newHashSet(segmentIdentifiers))) {
  log.warn(
    "Removing segments from deep storage because sanity check failed: %s", segmentsAndMetadata.getSegments()
  );
  segmentsAndMetadata.getSegments().forEach(dataSegmentKiller::killQuietly);
origin: org.apache.druid/druid-server

/**
 * Drop segments in background. The segments should be pushed (in batch ingestion) or published (in streaming
 * ingestion) before being dropped.
 *
 * @param segmentsAndMetadata result of pushing or publishing
 *
 * @return a future for dropping segments
 */
ListenableFuture<SegmentsAndMetadata> dropInBackground(SegmentsAndMetadata segmentsAndMetadata)
{
 log.info("Dropping segments[%s]", segmentsAndMetadata.getSegments());
 final ListenableFuture<?> dropFuture = Futures.allAsList(
   segmentsAndMetadata
     .getSegments()
     .stream()
     .map(segment -> appenderator.drop(SegmentIdentifier.fromDataSegment(segment)))
     .collect(Collectors.toList())
 );
 return Futures.transform(
   dropFuture,
   (Function<Object, SegmentsAndMetadata>) x -> {
    final Object metadata = segmentsAndMetadata.getCommitMetadata();
    return new SegmentsAndMetadata(
      segmentsAndMetadata.getSegments(),
      metadata == null ? null : ((AppenderatorDriverMetadata) metadata).getCallerMetadata()
    );
   }
 );
}
org.apache.druid.segment.realtime.appenderatorSegmentsAndMetadatagetSegments

Popular methods of SegmentsAndMetadata

  • getCommitMetadata
  • <init>

Popular in Java

  • Updating database using SQL prepared statement
  • getResourceAsStream (ClassLoader)
  • getExternalFilesDir (Context)
  • runOnUiThread (Activity)
  • Menu (java.awt)
  • Point (java.awt)
    A point representing a location in (x,y) coordinate space, specified in integer precision.
  • Kernel (java.awt.image)
  • InputStream (java.io)
    A readable source of bytes.Most clients will use input streams that read data from the file system (
  • Set (java.util)
    A Set is a data structure which does not allow duplicate elements.
  • JarFile (java.util.jar)
    JarFile is used to read jar entries and their associated data from jar files.
  • Top 12 Jupyter Notebook extensions
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now