Tabnine Logo
org.apache.druid.segment.realtime.appenderator
Code IndexAdd Tabnine to your IDE (free)

How to use org.apache.druid.segment.realtime.appenderator

Best Java code snippets using org.apache.druid.segment.realtime.appenderator (Showing top 20 results out of 315)

origin: apache/incubator-druid

public boolean isPushRequired(AppenderatorConfig tuningConfig)
{
 return isPushRequired(tuningConfig.getMaxRowsPerSegment(), tuningConfig.getMaxTotalRows());
}
origin: apache/incubator-druid

/**
 * Persist all data indexed through this driver so far. Returns a future of persisted commitMetadata.
 * <p>
 * Should be called after all data has been added through {@link #add(InputRow, String, Supplier, boolean, boolean)}.
 *
 * @param committer committer representing all data that has been added so far
 *
 * @return future containing commitMetadata persisted
 */
public ListenableFuture<Object> persistAsync(final Committer committer)
{
 return appenderator.persistAll(wrapCommitter(committer));
}
origin: apache/incubator-druid

public AppenderatorDriverAddResult add(
  InputRow row,
  String sequenceName,
  final Supplier<Committer> committerSupplier
) throws IOException
{
 return append(row, sequenceName, committerSupplier, false, true);
}
origin: apache/incubator-druid

void add(SegmentIdWithShardSpec identifier)
{
 intervalToSegmentStates.computeIfAbsent(
   identifier.getInterval().getStartMillis(),
   k -> new SegmentsOfInterval(identifier.getInterval())
 ).setAppendingSegment(SegmentWithState.newSegment(identifier));
 lastSegmentId = identifier.toString();
}
origin: apache/incubator-druid

@Before
public void setup()
{
 appenderatorTester = new AppenderatorTester(MAX_ROWS_IN_MEMORY);
 allocator = new TestSegmentAllocator(DATA_SOURCE, Granularities.HOUR);
 dataSegmentKiller = createStrictMock(DataSegmentKiller.class);
 driver = new BatchAppenderatorDriver(
   appenderatorTester.getAppenderator(),
   allocator,
   new TestUsedSegmentChecker(appenderatorTester),
   dataSegmentKiller
 );
 EasyMock.replay(dataSegmentKiller);
}
origin: apache/incubator-druid

@Test
public void testRestart()
{
 Assert.assertNull(driver.startJob());
 driver.close();
 appenderatorTester.getAppenderator().close();
 Assert.assertNull(driver.startJob());
}
origin: apache/incubator-druid

@Override
public Object startJob()
{
 tuningConfig.getBasePersistDirectory().mkdirs();
 lockBasePersistDirectory();
 final Object retVal = bootstrapSinksFromDisk();
 initializeExecutors();
 resetNextFlush();
 return retVal;
}
origin: apache/incubator-druid

@Override
public Object startJob()
{
 handoffNotifier.start();
 Object retVal = appenderator.startJob();
 initializeExecutors();
 startPersistThread();
 // Push pending sinks bootstrapped from previous run
 mergeAndPush();
 return retVal;
}
origin: apache/incubator-druid

 @Override
 public SegmentIdWithShardSpec apply(DataSegment input)
 {
  return SegmentIdWithShardSpec.fromDataSegment(input);
 }
}
origin: apache/incubator-druid

@Before
public void setUp()
{
 allocator = new TestSegmentAllocator(DATA_SOURCE, Granularities.HOUR);
 segmentHandoffNotifierFactory = new TestSegmentHandoffNotifierFactory();
 dataSegmentKiller = createStrictMock(DataSegmentKiller.class);
}
origin: apache/incubator-druid

/**
 * Same as {@link #add(SegmentIdWithShardSpec, InputRow, Supplier, boolean)}, with allowIncrementalPersists set to true
 */
default AppenderatorAddResult add(SegmentIdWithShardSpec identifier, InputRow row, Supplier<Committer> committerSupplier)
  throws IndexSizeExceededException, SegmentNotWritableException
{
 return add(identifier, row, committerSupplier, true);
}
origin: apache/incubator-druid

 @Override
 public Appenderator build(DataSchema schema, RealtimeTuningConfig config, FireDepartmentMetrics metrics)
 {
  return Appenderators.createOffline(schema, config, metrics, dataSegmentPusher, objectMapper, indexIO, indexMerger);
 }
}
origin: apache/incubator-druid

static Appenderator createPushFailAppenderator()
{
 return new FailableAppenderator().disablePush();
}
origin: apache/incubator-druid

static Appenderator createPersistFailAppenderator()
{
 return new FailableAppenderator().disablePersist();
}
origin: apache/incubator-druid

static Appenderator createPushInterruptAppenderator()
{
 return new FailableAppenderator().interruptPush();
}
origin: apache/incubator-druid

/**
 * Change the segment state to {@link SegmentState#APPEND_FINISHED}. The current state should be
 * {@link SegmentState#APPENDING}.
 */
public void finishAppending()
{
 checkStateTransition(this.state, SegmentState.APPENDING, SegmentState.APPEND_FINISHED);
 this.state = SegmentState.APPEND_FINISHED;
}
origin: apache/incubator-druid

static SegmentWithState newSegment(SegmentIdWithShardSpec segmentIdentifier)
{
 return new SegmentWithState(segmentIdentifier, SegmentState.APPENDING, null);
}
origin: apache/incubator-druid

public static AppenderatorDriverAddResult fail()
{
 return new AppenderatorDriverAddResult(null, 0, 0, false, null);
}
origin: apache/incubator-druid

/**
 * Change the segment state to {@link SegmentState#PUSHED_AND_DROPPED}. The current state should be
 * {@link SegmentState#APPENDING}. This method should be called after the segment of {@link #segmentIdentifier} is
 * completely pushed and dropped.
 *
 * @param dataSegment pushed {@link DataSegment}
 */
public void pushAndDrop(DataSegment dataSegment)
{
 checkStateTransition(this.state, SegmentState.APPENDING, SegmentState.PUSHED_AND_DROPPED);
 this.state = SegmentState.PUSHED_AND_DROPPED;
 this.dataSegment = dataSegment;
}
origin: apache/incubator-druid

static SegmentWithState newSegment(SegmentIdWithShardSpec segmentIdentifier, SegmentState state)
{
 return new SegmentWithState(segmentIdentifier, state, null);
}
org.apache.druid.segment.realtime.appenderator

Most used classes

  • AppenderatorDriverAddResult
    Result of BaseAppenderatorDriver#append). It contains the identifier of the segment which the InputR
  • SegmentIdWithShardSpec
    SegmentId with additional ShardSpec info. #equals/ #hashCode and #compareTo don't consider that addi
  • SegmentsAndMetadata
  • StreamAppenderatorDriver
    This class is specialized for streaming ingestion. In streaming ingestion, the segment lifecycle is
  • Appenderator
    An Appenderator indexes data. It has some in-memory data and some persisted-on-disk data. It can ser
  • BatchAppenderatorDriver,
  • UsedSegmentChecker,
  • Appenderator$AppenderatorAddResult,
  • AppenderatorFactory,
  • AppenderatorImpl,
  • AppenderatorPlumber,
  • BaseAppenderatorDriver$SegmentsForSequence,
  • Committed,
  • SegmentIdentifier,
  • SegmentWithState,
  • TransactionalSegmentPublisher,
  • AppenderatorConfig,
  • AppenderatorDriverMetadata,
  • AppenderatorTest
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimAtomGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyStudentsTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now