Tabnine Logo
HCatAddPartitionDesc$Builder
Code IndexAdd Tabnine to your IDE (free)

How to use
HCatAddPartitionDesc$Builder
in
org.apache.hive.hcatalog.api

Best Java code snippets using org.apache.hive.hcatalog.api.HCatAddPartitionDesc$Builder (Showing top 18 results out of 315)

origin: apache/hive

/**
 * Creates the builder for specifying attributes.
 *
 * @param dbName the db name
 * @param tableName the table name
 * @param location the location
 * @param partSpec the part spec
 * @return the builder
 * @throws HCatException
 */
@Deprecated // @deprecated in favour of {@link HCatAddPartitionDesc.#create(HCatPartition)}. To be removed in Hive 0.16.
public static Builder create(String dbName,
               String tableName,
               String location,
               Map<String, String> partSpec
          ) throws HCatException {
 LOG.error("Unsupported! HCatAddPartitionDesc requires HCatTable to be specified explicitly.");
 return new Builder(dbName, tableName, location, partSpec);
}
origin: apache/hive

makePartLocation(createdTable,partitionSpec))).build());
origin: org.apache.hive.hcatalog/hive-webhcat-java-client

/**
 * Creates the builder for specifying attributes.
 *
 * @param dbName the db name
 * @param tableName the table name
 * @param location the location
 * @param partSpec the part spec
 * @return the builder
 * @throws HCatException
 */
@Deprecated // @deprecated in favour of {@link HCatAddPartitionDesc.#create(HCatPartition)}. To be removed in Hive 0.16.
public static Builder create(String dbName,
               String tableName,
               String location,
               Map<String, String> partSpec
          ) throws HCatException {
 LOG.error("Unsupported! HCatAddPartitionDesc requires HCatTable to be specified explicitly.");
 return new Builder(dbName, tableName, location, partSpec);
}
origin: org.spark-project.hive.hcatalog/hive-webhcat-java-client

/**
 * Creates the builder for specifying attributes.
 *
 * @param dbName the db name
 * @param tableName the table name
 * @param location the location
 * @param partSpec the part spec
 * @return the builder
 * @throws HCatException
 */
@Deprecated // @deprecated in favour of {@link HCatAddPartitionDesc.#create(HCatPartition)}. To be removed in Hive 0.16.
public static Builder create(String dbName,
               String tableName,
               String location,
               Map<String, String> partSpec
          ) throws HCatException {
 LOG.error("Unsupported! HCatAddPartitionDesc requires HCatTable to be specified explicitly.");
 return new Builder(dbName, tableName, location, partSpec);
}
origin: com.github.hyukjinkwon.hcatalog/hive-webhcat-java-client

/**
 * Creates the builder for specifying attributes.
 *
 * @param dbName the db name
 * @param tableName the table name
 * @param location the location
 * @param partSpec the part spec
 * @return the builder
 * @throws HCatException
 */
@Deprecated // @deprecated in favour of {@link HCatAddPartitionDesc.#create(HCatPartition)}. To be removed in Hive 0.16.
public static Builder create(String dbName,
               String tableName,
               String location,
               Map<String, String> partSpec
          ) throws HCatException {
 LOG.error("Unsupported! HCatAddPartitionDesc requires HCatTable to be specified explicitly.");
 return new Builder(dbName, tableName, location, partSpec);
}
origin: org.spark-project.hive.hcatalog/hive-webhcat-java-client

/**
 * Constructs a Builder instance, using an HCatPartition object.
 * @param partition An HCatPartition instance.
 * @return A Builder object that can build an appropriate HCatAddPartitionDesc.
 * @throws HCatException
 */
public static Builder create(HCatPartition partition) throws HCatException {
 return new Builder(partition);
}
origin: org.apache.hive.hcatalog/hive-webhcat-java-client

/**
 * Constructs a Builder instance, using an HCatPartition object.
 * @param partition An HCatPartition instance.
 * @return A Builder object that can build an appropriate HCatAddPartitionDesc.
 * @throws HCatException
 */
public static Builder create(HCatPartition partition) throws HCatException {
 return new Builder(partition);
}
origin: com.github.hyukjinkwon.hcatalog/hive-webhcat-java-client

/**
 * Constructs a Builder instance, using an HCatPartition object.
 * @param partition An HCatPartition instance.
 * @return A Builder object that can build an appropriate HCatAddPartitionDesc.
 * @throws HCatException
 */
public static Builder create(HCatPartition partition) throws HCatException {
 return new Builder(partition);
}
origin: apache/oozie

public void addPartition(String db, String table, String partitionSpec, String location) throws Exception {
  String[] parts = partitionSpec.split(HCatURI.PARTITION_SEPARATOR);
  Map<String, String> partitions = new HashMap<String, String>();
  for (String part : parts) {
    String[] split = part.split("=");
    partitions.put(split[0], split[1]);
  }
  HCatAddPartitionDesc addPtn = HCatAddPartitionDesc.create(db, table, location, partitions).build();
  hcatClient.addPartition(addPtn);
  assertNotNull(hcatClient.getPartition(db, table, partitions));
}
origin: apache/hive

HCatPartition ptn1 = (new HCatPartition(table2Created, ptnDesc1,
  makePartLocation(table2Created,ptnDesc1)));
sourceMetastore.addPartition(HCatAddPartitionDesc.create(ptn1).build());
 HCatPartition ptn = (new HCatPartition(table2Created, ptnDesc,
   makePartLocation(table2Created,ptnDesc)));
 sourceMetastore.addPartition(HCatAddPartitionDesc.create(ptn).build());
 sourceMetastore.dropPartitions(dbName,tblName2,ptnDesc,true);
origin: apache/hive

  makePartLocation(sourceTable,partitionSpec_1));
sourceMetaStore().addPartition(HCatAddPartitionDesc.create(sourcePartition_1).build());
assertEquals("Unexpected number of partitions. ",
  1, sourceMetaStore().getPartitions(dbName, tableName).size());
HCatPartition sourcePartition_2 = new HCatPartition(sourceTable, partitionSpec_2,
  makePartLocation(sourceTable,partitionSpec_2));
sourceMetaStore().addPartition(HCatAddPartitionDesc.create(sourcePartition_2).build());
origin: apache/hive

  makePartLocation(sourceTable,partitionSpec_1));
sourceMetaStore().addPartition(HCatAddPartitionDesc.create(sourcePartition_1).build());
assertEquals("Unexpected number of partitions. ",
       1, sourceMetaStore().getPartitions(dbName, tableName).size());
HCatPartition sourcePartition_2 = new HCatPartition(sourceTable, partitionSpec_2,
  makePartLocation(sourceTable,partitionSpec_2));
sourceMetaStore().addPartition(HCatAddPartitionDesc.create(sourcePartition_2).build());
 addPartitionDescs.add(HCatAddPartitionDesc.create(partition).build());
origin: apache/hive

client.addPartition(HCatAddPartitionDesc.create(ptnToAdd).build());
client.addPartition(HCatAddPartitionDesc.create(ptnToAdd2).build());
origin: apache/hive

firstPtn.put("country", "usa");
HCatAddPartitionDesc addPtn = HCatAddPartitionDesc.create(new HCatPartition(table, firstPtn, null)).build();
client.addPartition(addPtn);
 tableName, null, secondPtn).build();
client.addPartition(addPtn2);
 tableName, null, thirdPtn).build();
client.addPartition(addPtn3);
origin: apache/hive

HCatPartition ptnToAdd1 = (new HCatPartition(tableCreated, ptnDesc1,
  TestHCatClient.makePartLocation(tableCreated,ptnDesc1))).parameters(props1);
client.addPartition(HCatAddPartitionDesc.create(ptnToAdd1).build());
HCatPartition ptnToAdd2 = (new HCatPartition(tableCreated, ptnDesc2,
  TestHCatClient.makePartLocation(tableCreated,ptnDesc2))).parameters(props2);
client.addPartition(HCatAddPartitionDesc.create(ptnToAdd2).build());
origin: apache/hive

partitionSpec.put("dt", "2011_12_31");
client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec,
  makePartLocation(table,partitionSpec))).build());
partitionSpec.put("grid", "AB");
partitionSpec.put("dt", "2012_01_01");
client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec,
  makePartLocation(table,partitionSpec))).build());
partitionSpec.put("dt", "2012_01_01");
partitionSpec.put("grid", "OB");
client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec,
  makePartLocation(table,partitionSpec))).build());
partitionSpec.put("dt", "2012_01_01");
partitionSpec.put("grid", "XB");
client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec,
  makePartLocation(table,partitionSpec))).build());
origin: apache/hive

partitionSpec.put("dt", "2011_12_31");
client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec,
  makePartLocation(table, partitionSpec))).build());
partitionSpec.put("grid", "AB");
partitionSpec.put("dt", "2012_01_01");
client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec,
  makePartLocation(table, partitionSpec))).build());
partitionSpec.put("dt", "2012_01_01");
partitionSpec.put("grid", "OB");
client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec,
  makePartLocation(table, partitionSpec))).build());
partitionSpec.put("dt", "2012_01_01");
partitionSpec.put("grid", "XB");
client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec,
  makePartLocation(table, partitionSpec))).build());
origin: apache/hive

/**
 * Constructs a Builder instance, using an HCatPartition object.
 * @param partition An HCatPartition instance.
 * @return A Builder object that can build an appropriate HCatAddPartitionDesc.
 * @throws HCatException
 */
public static Builder create(HCatPartition partition) throws HCatException {
 return new Builder(partition);
}
org.apache.hive.hcatalog.apiHCatAddPartitionDesc$Builder

Javadoc

Builder class for constructing an HCatAddPartition instance.

Most used methods

  • <init>
  • build
    Builds the HCatAddPartitionDesc.

Popular in Java

  • Making http post requests using okhttp
  • putExtra (Intent)
  • addToBackStack (FragmentTransaction)
  • notifyDataSetChanged (ArrayAdapter)
  • Point (java.awt)
    A point representing a location in (x,y) coordinate space, specified in integer precision.
  • KeyStore (java.security)
    KeyStore is responsible for maintaining cryptographic keys and their owners. The type of the syste
  • Dictionary (java.util)
    Note: Do not use this class since it is obsolete. Please use the Map interface for new implementatio
  • ThreadPoolExecutor (java.util.concurrent)
    An ExecutorService that executes each submitted task using one of possibly several pooled threads, n
  • Cipher (javax.crypto)
    This class provides access to implementations of cryptographic ciphers for encryption and decryption
  • Notification (javax.management)
  • Top plugins for Android Studio
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now