Tabnine Logo
HoodieActiveTimeline.filterPendingCompactionTimeline
Code IndexAdd Tabnine to your IDE (free)

How to use
filterPendingCompactionTimeline
method
in
com.uber.hoodie.common.table.timeline.HoodieActiveTimeline

Best Java code snippets using com.uber.hoodie.common.table.timeline.HoodieActiveTimeline.filterPendingCompactionTimeline (Showing top 16 results out of 315)

origin: uber/hudi

/**
 * Create a file system view, as of the given timeline
 */
public HoodieTableFileSystemView(HoodieTableMetaClient metaClient,
  HoodieTimeline visibleActiveTimeline) {
 this.metaClient = metaClient;
 this.visibleActiveTimeline = visibleActiveTimeline;
 this.fileGroupMap = new HashMap<>();
 this.partitionToFileGroupsMap = new HashMap<>();
 // Build fileId to Pending Compaction Instants
 List<HoodieInstant> pendingCompactionInstants =
   metaClient.getActiveTimeline().filterPendingCompactionTimeline().getInstants().collect(Collectors.toList());
 this.fileIdToPendingCompaction = ImmutableMap.copyOf(
   CompactionUtils.getAllPendingCompactionOperations(metaClient).entrySet().stream()
       .map(entry -> Pair.of(entry.getKey(), Pair.of(entry.getValue().getKey(),
           CompactionOperation.convertFromAvroRecordInstance(entry.getValue().getValue()))))
       .collect(Collectors.toMap(Pair::getKey, Pair::getValue)));
}
origin: uber/hudi

/**
 * Get all pending compaction plans along with their instants
 *
 * @param metaClient Hoodie Meta Client
 */
public static List<Pair<HoodieInstant, HoodieCompactionPlan>> getAllPendingCompactionPlans(
  HoodieTableMetaClient metaClient) {
 List<HoodieInstant> pendingCompactionInstants =
   metaClient.getActiveTimeline().filterPendingCompactionTimeline().getInstants().collect(Collectors.toList());
 return pendingCompactionInstants.stream().map(instant -> {
  try {
   return Pair.of(instant, getCompactionPlan(metaClient, instant.getTimestamp()));
  } catch (IOException e) {
   throw new HoodieException(e);
  }
 }).collect(Collectors.toList());
}
origin: uber/hudi

table.getActiveTimeline().filterPendingCompactionTimeline().firstInstant();
origin: com.uber.hoodie/hoodie-client

table.getActiveTimeline().filterPendingCompactionTimeline().firstInstant();
origin: com.uber.hoodie/hoodie-client

  new HoodieTableMetaClient(jsc.hadoopConfiguration(), config.getBasePath(), true), config, jsc);
Set<String> pendingCompactions =
  table.getActiveTimeline().filterPendingCompactionTimeline().getInstants()
    .map(HoodieInstant::getTimestamp).collect(Collectors.toSet());
HoodieTimeline inflightCommitTimeline = table.getInflightCommitTimeline();
origin: uber/hudi

  new HoodieTableMetaClient(jsc.hadoopConfiguration(), config.getBasePath(), true), config, jsc);
Set<String> pendingCompactions =
  table.getActiveTimeline().filterPendingCompactionTimeline().getInstants()
    .map(HoodieInstant::getTimestamp).collect(Collectors.toSet());
HoodieTimeline inflightCommitTimeline = table.getInflightCommitTimeline();
origin: uber/hudi

HoodieInstant compactionInflightInstant =
  new HoodieInstant(State.INFLIGHT, HoodieTimeline.COMPACTION_ACTION, compactionTime);
boolean isCompactionInstantInRequestedState = table.getActiveTimeline().filterPendingCompactionTimeline()
  .containsInstant(compactionRequestedInstant);
boolean isCompactionInstantInInflightState = table.getActiveTimeline().filterPendingCompactionTimeline()
  .containsInstant(compactionInflightInstant);
origin: com.uber.hoodie/hoodie-client

HoodieInstant compactionInflightInstant =
  new HoodieInstant(State.INFLIGHT, HoodieTimeline.COMPACTION_ACTION, compactionTime);
boolean isCompactionInstantInRequestedState = table.getActiveTimeline().filterPendingCompactionTimeline()
  .containsInstant(compactionRequestedInstant);
boolean isCompactionInstantInInflightState = table.getActiveTimeline().filterPendingCompactionTimeline()
  .containsInstant(compactionInflightInstant);
origin: uber/hudi

public void startCommitWithTime(String instantTime) {
 if (rollbackInFlight) {
  // Only rollback inflight commit/delta-commits. Do not touch compaction commits
  rollbackInflightCommits();
 }
 logger.info("Generate a new instant time " + instantTime);
 HoodieTableMetaClient metaClient = new HoodieTableMetaClient(jsc.hadoopConfiguration(), config.getBasePath());
 // if there are pending compactions, their instantTime must not be greater than that of this instant time
 metaClient.getActiveTimeline().filterPendingCompactionTimeline().lastInstant().ifPresent(latestPending -> {
  Preconditions.checkArgument(
    HoodieTimeline.compareTimestamps(latestPending.getTimestamp(), instantTime, HoodieTimeline.LESSER),
    "Latest pending compaction instant time must be earlier "
      + "than this instant time. Latest Compaction :" + latestPending + ",  Ingesting at " + instantTime);
 });
 HoodieTable<T> table = HoodieTable.getHoodieTable(metaClient, config, jsc);
 HoodieActiveTimeline activeTimeline = table.getActiveTimeline();
 String commitActionType = table.getMetaClient().getCommitActionType();
 activeTimeline.createInflight(new HoodieInstant(true, commitActionType, instantTime));
}
origin: com.uber.hoodie/hoodie-client

public void startCommitWithTime(String instantTime) {
 if (rollbackInFlight) {
  // Only rollback inflight commit/delta-commits. Do not touch compaction commits
  rollbackInflightCommits();
 }
 logger.info("Generate a new instant time " + instantTime);
 HoodieTableMetaClient metaClient = new HoodieTableMetaClient(jsc.hadoopConfiguration(), config.getBasePath());
 // if there are pending compactions, their instantTime must not be greater than that of this instant time
 metaClient.getActiveTimeline().filterPendingCompactionTimeline().lastInstant().ifPresent(latestPending -> {
  Preconditions.checkArgument(
    HoodieTimeline.compareTimestamps(latestPending.getTimestamp(), instantTime, HoodieTimeline.LESSER),
    "Latest pending compaction instant time must be earlier "
      + "than this instant time. Latest Compaction :" + latestPending + ",  Ingesting at " + instantTime);
 });
 HoodieTable<T> table = HoodieTable.getHoodieTable(metaClient, config, jsc);
 HoodieActiveTimeline activeTimeline = table.getActiveTimeline();
 String commitActionType = table.getMetaClient().getCommitActionType();
 activeTimeline.createInflight(new HoodieInstant(true, commitActionType, instantTime));
}
origin: uber/hudi

private void scheduleCompaction(String compactionInstantTime, HoodieWriteClient client, HoodieWriteConfig cfg)
  throws IOException {
 client.scheduleCompactionAtInstant(compactionInstantTime, Optional.empty());
 HoodieTableMetaClient metaClient = new HoodieTableMetaClient(jsc.hadoopConfiguration(), cfg.getBasePath());
 HoodieInstant instant = metaClient.getActiveTimeline().filterPendingCompactionTimeline().lastInstant().get();
 assertEquals("Last compaction instant must be the one set",
   instant.getTimestamp(), compactionInstantTime);
}
origin: uber/hudi

private void moveCompactionFromRequestedToInflight(String compactionInstantTime, HoodieWriteClient client,
  HoodieWriteConfig cfg) throws IOException {
 HoodieTableMetaClient metaClient = new HoodieTableMetaClient(jsc.hadoopConfiguration(), cfg.getBasePath());
 HoodieInstant compactionInstant = HoodieTimeline.getCompactionRequestedInstant(compactionInstantTime);
 HoodieCompactionPlan workload = AvroUtils.deserializeCompactionPlan(
   metaClient.getActiveTimeline().getInstantAuxiliaryDetails(compactionInstant).get());
 metaClient.getActiveTimeline().transitionCompactionRequestedToInflight(compactionInstant);
 HoodieInstant instant = metaClient.getActiveTimeline().reload().filterPendingCompactionTimeline().getInstants()
   .filter(in -> in.getTimestamp().equals(compactionInstantTime)).findAny().get();
 assertTrue("Instant must be marked inflight", instant.isInflight());
}
origin: com.uber.hoodie/hoodie-client

/**
 * Ensures compaction instant is in expected state and performs Compaction for the workload stored in instant-time
 * @param compactionInstantTime   Compaction Instant Time
 * @return
 * @throws IOException
 */
private JavaRDD<WriteStatus> compact(String compactionInstantTime, boolean autoCommit) throws IOException {
 // Create a Hoodie table which encapsulated the commits and files visible
 HoodieTableMetaClient metaClient = new HoodieTableMetaClient(jsc.hadoopConfiguration(),
   config.getBasePath(), true);
 HoodieTable<T> table = HoodieTable.getHoodieTable(metaClient, config, jsc);
 HoodieTimeline pendingCompactionTimeline = metaClient.getActiveTimeline().filterPendingCompactionTimeline();
 HoodieInstant inflightInstant = HoodieTimeline.getCompactionInflightInstant(compactionInstantTime);
 if (pendingCompactionTimeline.containsInstant(inflightInstant)) {
  //inflight compaction - Needs to rollback first deleting new parquet files before we run compaction.
  rollbackInflightCompaction(inflightInstant, table);
  // refresh table
  metaClient = new HoodieTableMetaClient(jsc.hadoopConfiguration(), config.getBasePath(), true);
  table = HoodieTable.getHoodieTable(metaClient, config, jsc);
  pendingCompactionTimeline = metaClient.getActiveTimeline().filterPendingCompactionTimeline();
 }
 HoodieInstant instant = HoodieTimeline.getCompactionRequestedInstant(compactionInstantTime);
 if (pendingCompactionTimeline.containsInstant(instant)) {
  return runCompaction(instant, metaClient.getActiveTimeline(), autoCommit);
 } else {
  throw new IllegalStateException("No Compaction request available at " + compactionInstantTime
    + " to run compaction");
 }
}
origin: uber/hudi

/**
 * Ensures compaction instant is in expected state and performs Compaction for the workload stored in instant-time
 * @param compactionInstantTime   Compaction Instant Time
 * @return
 * @throws IOException
 */
private JavaRDD<WriteStatus> compact(String compactionInstantTime, boolean autoCommit) throws IOException {
 // Create a Hoodie table which encapsulated the commits and files visible
 HoodieTableMetaClient metaClient = new HoodieTableMetaClient(jsc.hadoopConfiguration(),
   config.getBasePath(), true);
 HoodieTable<T> table = HoodieTable.getHoodieTable(metaClient, config, jsc);
 HoodieTimeline pendingCompactionTimeline = metaClient.getActiveTimeline().filterPendingCompactionTimeline();
 HoodieInstant inflightInstant = HoodieTimeline.getCompactionInflightInstant(compactionInstantTime);
 if (pendingCompactionTimeline.containsInstant(inflightInstant)) {
  //inflight compaction - Needs to rollback first deleting new parquet files before we run compaction.
  rollbackInflightCompaction(inflightInstant, table);
  // refresh table
  metaClient = new HoodieTableMetaClient(jsc.hadoopConfiguration(), config.getBasePath(), true);
  table = HoodieTable.getHoodieTable(metaClient, config, jsc);
  pendingCompactionTimeline = metaClient.getActiveTimeline().filterPendingCompactionTimeline();
 }
 HoodieInstant instant = HoodieTimeline.getCompactionRequestedInstant(compactionInstantTime);
 if (pendingCompactionTimeline.containsInstant(instant)) {
  return runCompaction(instant, metaClient.getActiveTimeline(), autoCommit);
 } else {
  throw new IllegalStateException("No Compaction request available at " + compactionInstantTime
    + " to run compaction");
 }
}
origin: uber/hudi

metaClient = new HoodieTableMetaClient(jsc.hadoopConfiguration(), cfg.getBasePath());
HoodieInstant pendingCompactionInstant =
  metaClient.getActiveTimeline().filterPendingCompactionTimeline().firstInstant().get();
assertTrue("Pending Compaction instant has expected instant time",
  pendingCompactionInstant.getTimestamp().equals(compactionInstantTime));
origin: uber/hudi

  metaClient.getActiveTimeline().filterPendingCompactionTimeline().firstInstant().get();
assertTrue("Pending Compaction instant has expected instant time",
  pendingCompactionInstant.getTimestamp().equals(compactionInstantTime));
  metaClient.getActiveTimeline().filterPendingCompactionTimeline().firstInstant().get();
assertTrue("Pending Compaction instant has expected instant time",
  pendingCompactionInstant.getTimestamp().equals(compactionInstantTime));
com.uber.hoodie.common.table.timelineHoodieActiveTimelinefilterPendingCompactionTimeline

Popular methods of HoodieActiveTimeline

  • getCommitTimeline
  • getCommitsTimeline
  • getCommitsAndCompactionTimeline
  • getInstantDetails
  • getTimelineOfActions
  • createNewCommitTime
  • getInstantAuxiliaryDetails
  • reload
  • <init>
  • createInflight
  • getDeltaCommitTimeline
  • lastInstant
  • getDeltaCommitTimeline,
  • lastInstant,
  • saveAsComplete,
  • saveToCompactionRequested,
  • transitionCompactionRequestedToInflight,
  • deleteCompactionRequested,
  • deleteInflight,
  • filterInflightsExcludingCompaction,
  • getAllCommitsTimeline

Popular in Java

  • Creating JSON documents from java classes using gson
  • putExtra (Intent)
  • startActivity (Activity)
  • onCreateOptionsMenu (Activity)
  • HttpServer (com.sun.net.httpserver)
    This class implements a simple HTTP server. A HttpServer is bound to an IP address and port number a
  • Enumeration (java.util)
    A legacy iteration interface.New code should use Iterator instead. Iterator replaces the enumeration
  • BlockingQueue (java.util.concurrent)
    A java.util.Queue that additionally supports operations that wait for the queue to become non-empty
  • HttpServlet (javax.servlet.http)
    Provides an abstract class to be subclassed to create an HTTP servlet suitable for a Web site. A sub
  • Project (org.apache.tools.ant)
    Central representation of an Ant project. This class defines an Ant project with all of its targets,
  • DateTimeFormat (org.joda.time.format)
    Factory that creates instances of DateTimeFormatter from patterns and styles. Datetime formatting i
  • Top plugins for WebStorm
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now