@Parameters({ "hive.hadoop1.metastoreHost", "hive.hadoop1.metastorePort", "hive.hadoop1.databaseName", "hive.hadoop1.s3.awsAccessKey", "hive.hadoop1.s3.awsSecretKey", "hive.hadoop1.s3.writableBucket", }) @BeforeClass @Override public void setup(String host, int port, String databaseName, String awsAccessKey, String awsSecretKey, String writableBucket) { super.setup(host, port, databaseName, awsAccessKey, awsSecretKey, writableBucket); } }
@Test public void testTableCreation() throws Exception { for (HiveStorageFormat storageFormat : HiveStorageFormat.values()) { try { doCreateTable(temporaryCreateTable, storageFormat, "presto_test"); } finally { dropTable(temporaryCreateTable); } } }
@Test public void testGetRecordsS3() throws Exception { HiveTransactionHandle transaction = new HiveTransactionHandle(); HiveMetadata metadata = metadataFactory.create(); ConnectorTableHandle table = getTableHandle(metadata, tableS3); List<ColumnHandle> columnHandles = ImmutableList.copyOf(metadata.getColumnHandles(SESSION, table).values()); Map<String, Integer> columnIndex = indexColumns(columnHandles); List<ConnectorTableLayoutResult> tableLayoutResults = metadata.getTableLayouts(SESSION, table, new Constraint<>(TupleDomain.all(), bindings -> true), Optional.empty()); HiveTableLayoutHandle layoutHandle = (HiveTableLayoutHandle) getOnlyElement(tableLayoutResults).getTableLayout().getHandle(); assertEquals(layoutHandle.getPartitions().get().size(), 1); ConnectorSplitSource splitSource = splitManager.getSplits(transaction, SESSION, layoutHandle); long sum = 0; for (ConnectorSplit split : getAllSplits(splitSource)) { try (ConnectorPageSource pageSource = pageSourceProvider.createPageSource(transaction, SESSION, split, columnHandles)) { MaterializedResult result = materializeSourceDataStream(SESSION, pageSource, getTypes(columnHandles)); for (MaterializedRow row : result) { sum += (Long) row.getField(columnIndex.get("t_bigint")); } } } assertEquals(sum, 78300); }
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName); List<ColumnHandle> columnHandles = ImmutableList.copyOf(metadata.getColumnHandles(SESSION, tableHandle).values()); tableMetadata = metadata.getTableMetadata(SESSION, getTableHandle(metadata, tableName)); assertEquals(tableMetadata.getOwner(), tableOwner); assertEquals(tableMetadata.getColumns(), columns); assertEquals(layoutHandle.getPartitions().get().size(), 1); ConnectorSplitSource splitSource = splitManager.getSplits(transaction, SESSION, layoutHandle); ConnectorSplit split = getOnlyElement(getAllSplits(splitSource));
setupHive(databaseName);
@Parameters({ "hive.cdh5.metastoreHost", "hive.cdh5.metastorePort", "hive.cdh5.databaseName", "hive.cdh5.s3.awsAccessKey", "hive.cdh5.s3.awsSecretKey", "hive.cdh5.s3.writableBucket", }) @BeforeClass @Override public void setup(String host, int port, String databaseName, String awsAccessKey, String awsSecretKey, String writableBucket) { super.setup(host, port, databaseName, awsAccessKey, awsSecretKey, writableBucket); } }
@Parameters({ "hive.cdh4.metastoreHost", "hive.cdh4.metastorePort", "hive.cdh4.databaseName", "hive.cdh4.s3.awsAccessKey", "hive.cdh4.s3.awsSecretKey", "hive.cdh4.s3.writableBucket", }) @BeforeClass @Override public void setup(String host, int port, String databaseName, String awsAccessKey, String awsSecretKey, String writableBucket) { super.setup(host, port, databaseName, awsAccessKey, awsSecretKey, writableBucket); } }