congrats Icon
New! Announcing Tabnine Chat Beta
Learn More
Tabnine Logo
AppenderatorDriverAddResult.isOk
Code IndexAdd Tabnine to your IDE (free)

How to use
isOk
method
in
org.apache.druid.segment.realtime.appenderator.AppenderatorDriverAddResult

Best Java code snippets using org.apache.druid.segment.realtime.appenderator.AppenderatorDriverAddResult.isOk (Showing top 19 results out of 315)

origin: apache/incubator-druid

@Test(timeout = 60_000L, expected = TimeoutException.class)
public void testHandoffTimeout() throws Exception
{
 final TestCommitterSupplier<Integer> committerSupplier = new TestCommitterSupplier<>();
 segmentHandoffNotifierFactory.disableHandoff();
 Assert.assertNull(driver.startJob());
 for (int i = 0; i < ROWS.size(); i++) {
  committerSupplier.setMetadata(i + 1);
  Assert.assertTrue(driver.add(ROWS.get(i), "dummy", committerSupplier, false, true).isOk());
 }
 final SegmentsAndMetadata published = driver.publish(
   makeOkPublisher(),
   committerSupplier.get(),
   ImmutableList.of("dummy")
 ).get(PUBLISH_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
 while (driver.getSegments().containsKey("dummy")) {
  Thread.sleep(100);
 }
 driver.registerHandoff(published).get(HANDOFF_CONDITION_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
}
origin: apache/incubator-druid

);
if (addResult.isOk()) {
origin: apache/incubator-druid

);
if (addResult.isOk()) {
origin: apache/incubator-druid

Assert.assertTrue(addResult.isOk());
if (addResult.getNumRowsInSegment() > MAX_ROWS_PER_SEGMENT) {
 driver.moveSegmentOut("dummy", ImmutableList.of(addResult.getSegmentIdentifier()));
origin: apache/incubator-druid

@Test(timeout = 60_000L)
public void testSimple() throws Exception
{
 final TestCommitterSupplier<Integer> committerSupplier = new TestCommitterSupplier<>();
 Assert.assertNull(driver.startJob());
 for (int i = 0; i < ROWS.size(); i++) {
  committerSupplier.setMetadata(i + 1);
  Assert.assertTrue(driver.add(ROWS.get(i), "dummy", committerSupplier, false, true).isOk());
 }
 final SegmentsAndMetadata published = driver.publish(
   makeOkPublisher(),
   committerSupplier.get(),
   ImmutableList.of("dummy")
 ).get(PUBLISH_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
 while (driver.getSegments().containsKey("dummy")) {
  Thread.sleep(100);
 }
 final SegmentsAndMetadata segmentsAndMetadata = driver.registerHandoff(published)
                            .get(HANDOFF_CONDITION_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
 Assert.assertEquals(
   ImmutableSet.of(
     new SegmentIdWithShardSpec(DATA_SOURCE, Intervals.of("2000/PT1H"), VERSION, new NumberedShardSpec(0, 0)),
     new SegmentIdWithShardSpec(DATA_SOURCE, Intervals.of("2000T01/PT1H"), VERSION, new NumberedShardSpec(0, 0))
   ),
   asIdentifiers(segmentsAndMetadata.getSegments())
 );
 Assert.assertEquals(3, segmentsAndMetadata.getCommitMetadata());
}
origin: apache/incubator-druid

final AppenderatorDriverAddResult addResult = driver.add(inputRow, sequenceName);
if (addResult.isOk()) {
 if (addResult.isPushRequired(maxRowsPerSegment, maxTotalRows)) {
origin: apache/incubator-druid

@Test
public void testSimple() throws Exception
{
 Assert.assertNull(driver.startJob());
 for (InputRow row : ROWS) {
  Assert.assertTrue(driver.add(row, "dummy").isOk());
 }
 checkSegmentStates(2, SegmentState.APPENDING);
 driver.pushAllAndClear(TIMEOUT);
 checkSegmentStates(2, SegmentState.PUSHED_AND_DROPPED);
 final SegmentsAndMetadata published = driver.publishAll(makeOkPublisher()).get(
   TIMEOUT,
   TimeUnit.MILLISECONDS
 );
 Assert.assertEquals(
   ImmutableSet.of(
     new SegmentIdWithShardSpec(DATA_SOURCE, Intervals.of("2000/PT1H"), VERSION, new NumberedShardSpec(0, 0)),
     new SegmentIdWithShardSpec(DATA_SOURCE, Intervals.of("2000T01/PT1H"), VERSION, new NumberedShardSpec(0, 0))
   ),
   published.getSegments()
        .stream()
        .map(SegmentIdWithShardSpec::fromDataSegment)
        .collect(Collectors.toSet())
 );
 Assert.assertNull(published.getCommitMetadata());
}
origin: apache/incubator-druid

Assert.assertTrue(driver.add(ROWS.get(i), "dummy", committerSupplier, false, true).isOk());
origin: apache/incubator-druid

Assert.assertTrue(driver.add(ROWS.get(i), "dummy", committerSupplier, false, true).isOk());
origin: apache/incubator-druid

Assert.assertTrue(driver.add(ROWS.get(i), "dummy", committerSupplier, false, true).isOk());
origin: apache/incubator-druid

if (addResult.isOk()) {
origin: apache/incubator-druid

Assert.assertTrue(driver.add(ROWS.get(i), "dummy", committerSupplier, false, true).isOk());
origin: apache/incubator-druid

Assert.assertTrue(driver.add(ROWS.get(0), "sequence_0", committerSupplier, false, true).isOk());
 Assert.assertTrue(driver.add(ROWS.get(i), "sequence_1", committerSupplier, false, true).isOk());
origin: apache/incubator-druid

@Test
public void testIncrementalPush() throws Exception
{
 Assert.assertNull(driver.startJob());
 int i = 0;
 for (InputRow row : ROWS) {
  Assert.assertTrue(driver.add(row, "dummy").isOk());
  checkSegmentStates(1, SegmentState.APPENDING);
  checkSegmentStates(i, SegmentState.PUSHED_AND_DROPPED);
  driver.pushAllAndClear(TIMEOUT);
  checkSegmentStates(0, SegmentState.APPENDING);
  checkSegmentStates(++i, SegmentState.PUSHED_AND_DROPPED);
 }
 final SegmentsAndMetadata published = driver.publishAll(makeOkPublisher()).get(
   TIMEOUT,
   TimeUnit.MILLISECONDS
 );
 Assert.assertEquals(
   ImmutableSet.of(
     new SegmentIdWithShardSpec(DATA_SOURCE, Intervals.of("2000/PT1H"), VERSION, new NumberedShardSpec(0, 0)),
     new SegmentIdWithShardSpec(DATA_SOURCE, Intervals.of("2000T01/PT1H"), VERSION, new NumberedShardSpec(0, 0)),
     new SegmentIdWithShardSpec(DATA_SOURCE, Intervals.of("2000T01/PT1H"), VERSION, new NumberedShardSpec(1, 0))
   ),
   published.getSegments()
        .stream()
        .map(SegmentIdWithShardSpec::fromDataSegment)
        .collect(Collectors.toSet())
 );
 Assert.assertNull(published.getCommitMetadata());
}
origin: apache/incubator-druid

Assert.assertTrue(driver.add(ROWS.get(0), "dummy", committerSupplier, false, true).isOk());
Assert.assertTrue(driver.add(ROWS.get(i), "dummy", committerSupplier, false, true).isOk());
origin: apache/incubator-druid

AppenderatorDriverAddResult addResult = driver.add(inputRow, sequenceName, committerSupplier);
if (addResult.isOk()) {
 if (addResult.isPushRequired(tuningConfig)) {
  publishSegments(driver, publisher, committerSupplier, sequenceName);
origin: org.apache.druid/druid-indexing-service

final AppenderatorDriverAddResult addResult = driver.add(inputRow, sequenceName);
if (addResult.isOk()) {
 if (exceedMaxRowsInSegment(targetPartitionSize, addResult.getNumRowsInSegment()) ||
   exceedMaxRowsInAppenderator(maxTotalRows, addResult.getTotalNumRowsInAppenderator())) {
origin: org.apache.druid/druid-indexing-service

if (addResult.isOk()) {
origin: org.apache.druid/druid-indexing-service

AppenderatorDriverAddResult addResult = driver.add(inputRow, sequenceName, committerSupplier);
if (addResult.isOk()) {
 if (addResult.isPushRequired(tuningConfig)) {
  publishSegments(driver, publisher, committerSupplier, sequenceName);
org.apache.druid.segment.realtime.appenderatorAppenderatorDriverAddResultisOk

Popular methods of AppenderatorDriverAddResult

  • getNumRowsInSegment
  • getParseException
  • getSegmentIdentifier
  • getTotalNumRowsInAppenderator
  • isPersistRequired
  • isPushRequired
  • <init>
  • fail
  • ok

Popular in Java

  • Finding current android device location
  • scheduleAtFixedRate (ScheduledExecutorService)
  • findViewById (Activity)
  • getContentResolver (Context)
  • BufferedReader (java.io)
    Wraps an existing Reader and buffers the input. Expensive interaction with the underlying reader is
  • Selector (java.nio.channels)
    A controller for the selection of SelectableChannel objects. Selectable channels can be registered w
  • GregorianCalendar (java.util)
    GregorianCalendar is a concrete subclass of Calendarand provides the standard calendar used by most
  • Map (java.util)
    A Map is a data structure consisting of a set of keys and values in which each key is mapped to a si
  • TreeSet (java.util)
    TreeSet is an implementation of SortedSet. All optional operations (adding and removing) are support
  • Table (org.hibernate.mapping)
    A relational table
  • Best IntelliJ plugins
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now