Tabnine Logo
AcidOutputFormat.getRecordUpdater
Code IndexAdd Tabnine to your IDE (free)

How to use
getRecordUpdater
method
in
org.apache.hadoop.hive.ql.io.AcidOutputFormat

Best Java code snippets using org.apache.hadoop.hive.ql.io.AcidOutputFormat.getRecordUpdater (Showing top 10 results out of 315)

origin: apache/hive

@Before
public void injectMocks() throws IOException {
 when(mockOutputFormat.getRecordUpdater(eq(PATH), any(Options.class))).thenReturn(mockRecordUpdater);
 mutator = new MutatorImpl(configuration, RECORD_ID_COLUMN, mockObjectInspector, mockOutputFormat, WRITE_ID,
   PATH, BUCKET_ID);
}
origin: apache/hive

private RecordUpdater createRecordUpdater(int bucketId, Long minWriteId, Long maxWriteID)
    throws IOException, SerializationError {
 try {
  // Initialize table properties from the table parameters. This is required because the table
  // may define certain table parameters that may be required while writing. The table parameter
  // 'transactional_properties' is one such example.
  Properties tblProperties = new Properties();
  tblProperties.putAll(tbl.getParameters());
  return  outf.getRecordUpdater(partitionPath,
      new AcidOutputFormat.Options(conf)
          .inspector(getSerde().getObjectInspector())
          .bucket(bucketId)
          .tableProperties(tblProperties)
          .minimumWriteId(minWriteId)
          .maximumWriteId(maxWriteID)
          .statementId(-1)
          .finalDestination(partitionPath));
 } catch (SerDeException e) {
  throw new SerializationError("Failed to get object inspector from Serde "
      + getSerde().getClass().getName(), e);
 }
}
origin: apache/hive

protected RecordUpdater createRecordUpdater(final Path partitionPath, int bucketId, Long minWriteId,
 Long maxWriteID)
 throws IOException {
 // Initialize table properties from the table parameters. This is required because the table
 // may define certain table parameters that may be required while writing. The table parameter
 // 'transactional_properties' is one such example.
 Properties tblProperties = new Properties();
 tblProperties.putAll(table.getParameters());
 return acidOutputFormat.getRecordUpdater(partitionPath,
  new AcidOutputFormat.Options(conf)
   .filesystem(fs)
   .inspector(outputRowObjectInspector)
   .bucket(bucketId)
   .tableProperties(tblProperties)
   .minimumWriteId(minWriteId)
   .maximumWriteId(maxWriteID)
   .statementId(statementId)
   .finalDestination(partitionPath));
}
origin: apache/hive

@Test
public void testCreatesRecordReader() throws IOException {
 verify(mockOutputFormat).getRecordUpdater(eq(PATH), captureOptions.capture());
 Options options = captureOptions.getValue();
 assertThat(options.getBucketId(), is(BUCKET_ID));
 assertThat(options.getConfiguration(), is((Configuration) configuration));
 assertThat(options.getInspector(), is(mockObjectInspector));
 assertThat(options.getRecordIdColumn(), is(RECORD_ID_COLUMN));
 assertThat(options.getMinimumWriteId(), is(WRITE_ID));
 assertThat(options.getMaximumWriteId(), is(WRITE_ID));
}
origin: apache/hive

protected RecordUpdater createRecordUpdater(AcidOutputFormat<?, ?> outputFormat) throws IOException {
 int bucketId = BucketCodec
  .determineVersion(bucketProperty).decodeWriterId(bucketProperty); 
 return outputFormat.getRecordUpdater(
   partitionPath,
   new AcidOutputFormat.Options(configuration)
     .inspector(objectInspector)
     .bucket(bucketId)
     .minimumWriteId(writeId)
     .maximumWriteId(writeId)
     .recordIdColumn(recordIdColumn)
     .finalDestination(partitionPath)
     .statementId(-1));
}
origin: apache/hive

private static RecordUpdater getRecordUpdater(JobConf jc,
                       AcidOutputFormat<?, ?> acidOutputFormat,
                       int bucket,
                       ObjectInspector inspector,
                       Properties tableProp,
                       Path outPath,
                       Reporter reporter,
                       int rowIdColNum,
                       FileSinkDesc conf) throws IOException {
 return acidOutputFormat.getRecordUpdater(outPath, new AcidOutputFormat.Options(jc)
   .isCompressed(conf.getCompressed())
   .tableProperties(tableProp)
   .reporter(reporter)
   .writingBase(conf.getInsertOverwrite())
   .minimumWriteId(conf.getTableWriteId())
   .maximumWriteId(conf.getTableWriteId())
   .bucket(bucket)
   .inspector(inspector)
   .recordIdColumn(rowIdColNum)
   .statementId(conf.getStatementId())
   .finalDestination(conf.getDestPath()));
}
origin: apache/drill

private static RecordUpdater getRecordUpdater(JobConf jc,
                       AcidOutputFormat<?, ?> acidOutputFormat,
                       int bucket,
                       ObjectInspector inspector,
                       Properties tableProp,
                       Path outPath,
                       Reporter reporter,
                       int rowIdColNum,
                       FileSinkDesc conf) throws IOException {
 return acidOutputFormat.getRecordUpdater(outPath, new AcidOutputFormat.Options(jc)
   .isCompressed(conf.getCompressed())
   .tableProperties(tableProp)
   .reporter(reporter)
   .writingBase(false)
   .minimumTransactionId(conf.getTransactionId())
   .maximumTransactionId(conf.getTransactionId())
   .bucket(bucket)
   .inspector(inspector)
   .recordIdColumn(rowIdColNum)
   .statementId(conf.getStatementId())
   .finalDestination(conf.getDestPath()));
}
origin: org.spark-project.hive.hcatalog/hive-hcatalog-streaming

private RecordUpdater createRecordUpdater(int bucketId, Long minTxnId, Long maxTxnID)
    throws IOException, SerializationError {
 try {
  return  outf.getRecordUpdater(partitionPath,
      new AcidOutputFormat.Options(conf)
          .inspector(getSerde().getObjectInspector())
          .bucket(bucketId)
          .minimumTransactionId(minTxnId)
          .maximumTransactionId(maxTxnID));
 } catch (SerDeException e) {
  throw new SerializationError("Failed to get object inspector from Serde "
      + getSerde().getClass().getName(), e);
 }
}
origin: org.apache.hive/hive-streaming

protected RecordUpdater createRecordUpdater(final Path partitionPath, int bucketId, Long minWriteId,
 Long maxWriteID)
 throws IOException {
 // Initialize table properties from the table parameters. This is required because the table
 // may define certain table parameters that may be required while writing. The table parameter
 // 'transactional_properties' is one such example.
 Properties tblProperties = new Properties();
 tblProperties.putAll(table.getParameters());
 return acidOutputFormat.getRecordUpdater(partitionPath,
  new AcidOutputFormat.Options(conf)
   .filesystem(fs)
   .inspector(outputRowObjectInspector)
   .bucket(bucketId)
   .tableProperties(tblProperties)
   .minimumWriteId(minWriteId)
   .maximumWriteId(maxWriteID)
   .statementId(-1)
   .finalDestination(partitionPath));
}
origin: com.facebook.presto.hive/hive-apache

private static RecordUpdater getRecordUpdater(JobConf jc,
                       AcidOutputFormat<?, ?> acidOutputFormat,
                       boolean isCompressed,
                       long txnId,
                       int bucket,
                       ObjectInspector inspector,
                       Properties tableProp,
                       Path outPath,
                       Reporter reporter,
                       int rowIdColNum) throws IOException {
 return acidOutputFormat.getRecordUpdater(outPath, new AcidOutputFormat.Options(jc)
   .isCompressed(isCompressed)
   .tableProperties(tableProp)
   .reporter(reporter)
   .writingBase(false)
   .minimumTransactionId(txnId)
   .maximumTransactionId(txnId)
   .bucket(bucket)
   .inspector(inspector)
   .recordIdColumn(rowIdColNum));
}
org.apache.hadoop.hive.ql.ioAcidOutputFormatgetRecordUpdater

Javadoc

Create a RecordUpdater for inserting, updating, or deleting records.

Popular methods of AcidOutputFormat

  • getRawRecordWriter
    Create a raw writer for ACID events. This is only intended for the compactor.

Popular in Java

  • Start an intent from android
  • findViewById (Activity)
  • requestLocationUpdates (LocationManager)
  • putExtra (Intent)
  • HttpServer (com.sun.net.httpserver)
    This class implements a simple HTTP server. A HttpServer is bound to an IP address and port number a
  • Component (java.awt)
    A component is an object having a graphical representation that can be displayed on the screen and t
  • GregorianCalendar (java.util)
    GregorianCalendar is a concrete subclass of Calendarand provides the standard calendar used by most
  • ResourceBundle (java.util)
    ResourceBundle is an abstract class which is the superclass of classes which provide Locale-specifi
  • UUID (java.util)
    UUID is an immutable representation of a 128-bit universally unique identifier (UUID). There are mul
  • Runner (org.openjdk.jmh.runner)
  • Top PhpStorm plugins
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now