private CassandraPartition() { partitionId = UNPARTITIONED_ID; tupleDomain = TupleDomain.all(); key = null; indexedColumnPredicatePushdown = false; }
@Override protected ExtractionResult visitExpression(Expression node, Boolean complement) { // If we don't know how to process this node, the default response is to say that the TupleDomain is "all" return new ExtractionResult(TupleDomain.all(), complementIfNecessary(node, complement)); }
private static <T extends TpchEntity> ConnectorPageSource createPageSource(TpchTable<T> table, List<String> columnNames, SplitInfo splitInfo) { List<TpchColumn<T>> columns = columnNames.stream().map(table::getColumn).collect(toList()); return new RecordPageSource(createTpchRecordSet( table, columns, schemaNameToScaleFactor(splitInfo.getSchemaName()), splitInfo.getPartNumber(), splitInfo.getTotalParts(), TupleDomain.all())); }
@Test public void testEmptySingleValuesMapToDomain() { assertEquals(TupleDomain.fromFixedValues(ImmutableMap.of()), TupleDomain.all()); }
protected HiveSplit getHiveSplit(ConnectorTableHandle tableHandle) { List<ConnectorSplit> splits = getAllSplits(tableHandle, TupleDomain.all()); assertEquals(splits.size(), 1); return (HiveSplit) getOnlyElement(splits); }
@Test public void testExtractFixedValuesFromAll() { assertEquals(TupleDomain.extractFixedValues(TupleDomain.all()).get(), ImmutableMap.of()); }
@Test public void testAll() { assertTrue(TupleDomain.all().isAll()); assertEquals(TupleDomain.<ColumnHandle>all(), TupleDomain.withColumnDomains(ImmutableMap.of( A, Domain.all(BIGINT)))); assertEquals(TupleDomain.<ColumnHandle>all(), TupleDomain.withColumnDomains(ImmutableMap.<ColumnHandle, Domain>of())); }
@Override protected ExtractionResult visitBooleanLiteral(BooleanLiteral node, Boolean complement) { boolean value = complement ? !node.getValue() : node.getValue(); return new ExtractionResult(value ? TupleDomain.all() : TupleDomain.none(), TRUE_LITERAL); }
@Test public void testAddresses() { // split uses "example" scheme so no addresses are available and is not remotely accessible assertEquals(split.getAddresses(), ImmutableList.of()); assertEquals(split.isRemotelyAccessible(), true); JdbcSplit jdbcSplit = new JdbcSplit("connectorId", "catalog", "schemaName", "tableName", TupleDomain.all(), Optional.empty()); assertEquals(jdbcSplit.getAddresses(), ImmutableList.of()); }
@Test public void testEmptyColumns() { IonSqlQueryBuilder queryBuilder = new IonSqlQueryBuilder(new TypeRegistry()); assertEquals("SELECT ' ' FROM S3Object s", queryBuilder.buildSql(ImmutableList.of(), TupleDomain.all())); }
@Test(expectedExceptions = PrestoException.class, expectedExceptionsMessageRegExp = ".*The column 't_data' in table '.*\\.presto_test_partition_schema_change' is declared as type 'double', but partition 'ds=2012-12-29' declared column 't_data' as type 'string'.") public void testPartitionSchemaMismatch() throws Exception { try (Transaction transaction = newTransaction()) { ConnectorMetadata metadata = transaction.getMetadata(); ConnectorTableHandle table = getTableHandle(metadata, tablePartitionSchemaChange); readTable(transaction, table, ImmutableList.of(dsColumn), newSession(), TupleDomain.all(), OptionalInt.empty(), Optional.empty()); } }
@Test(expectedExceptions = RuntimeException.class, expectedExceptionsMessageRegExp = ".*" + INVALID_COLUMN + ".*") public void testGetRecordsInvalidColumn() throws Exception { try (Transaction transaction = newTransaction()) { ConnectorMetadata connectorMetadata = transaction.getMetadata(); ConnectorTableHandle table = getTableHandle(connectorMetadata, tableUnpartitioned); readTable(transaction, table, ImmutableList.of(invalidColumnHandle), newSession(), TupleDomain.all(), OptionalInt.empty(), Optional.empty()); } }
@Test public void testJsonRoundTrip() { MongoSplit expected = new MongoSplit(new SchemaTableName("schema1", "table1"), TupleDomain.all(), ImmutableList.of()); String json = codec.toJson(expected); MongoSplit actual = codec.fromJson(json); assertEquals(actual.getSchemaTableName(), expected.getSchemaTableName()); assertEquals(actual.getTupleDomain(), TupleDomain.<ColumnHandle>all()); assertEquals(actual.getAddresses(), ImmutableList.of()); } }
@Test public void testMapInt32WithoutOriginalTypeToPrestoInteger() { // int32 primitive should default to Presto integer if original type metadata isn't available PrimitiveType intType = new PrimitiveType(OPTIONAL, PrimitiveTypeName.INT32, "int_col"); ColumnDescriptor columnDescriptor = new ColumnDescriptor(new String[]{"int_col"}, PrimitiveTypeName.INT32, 0, 1); RichColumnDescriptor intColumn = new RichColumnDescriptor(columnDescriptor, intType); assertEquals(getPrestoType(TupleDomain.all(), intColumn), INTEGER); }
@Test public void testMapInt32ToPrestoInteger() { PrimitiveType intType = new PrimitiveType(OPTIONAL, PrimitiveTypeName.INT32, "int_col", OriginalType.INT_32); ColumnDescriptor columnDescriptor = new ColumnDescriptor(new String[]{"int_col"}, PrimitiveTypeName.INT32, 0, 1); RichColumnDescriptor intColumn = new RichColumnDescriptor(columnDescriptor, intType); assertEquals(getPrestoType(TupleDomain.all(), intColumn), INTEGER); }
public TableScanNode tableScan(List<Symbol> symbols, Map<Symbol, ColumnHandle> assignments) { TableHandle tableHandle = new TableHandle(new ConnectorId("testConnector"), new TestingTableHandle()); return tableScan(tableHandle, symbols, assignments, Optional.empty(), TupleDomain.all(), TupleDomain.all()); }
@Test public void testNoHangIfPartitionIsOffline() throws Exception { BackgroundHiveSplitLoader backgroundHiveSplitLoader = backgroundHiveSplitLoaderOfflinePartitions(); HiveSplitSource hiveSplitSource = hiveSplitSource(backgroundHiveSplitLoader, TupleDomain.all()); backgroundHiveSplitLoader.start(hiveSplitSource); assertThrows(RuntimeException.class, () -> drain(hiveSplitSource)); assertThrows(RuntimeException.class, () -> hiveSplitSource.isFinished()); }
public JdbcSplit getSplit(String schemaName, String tableName) { JdbcTableHandle jdbcTableHandle = jdbcClient.getTableHandle(new SchemaTableName(schemaName, tableName)); JdbcTableLayoutHandle jdbcLayoutHandle = new JdbcTableLayoutHandle(jdbcTableHandle, TupleDomain.all()); ConnectorSplitSource splits = jdbcClient.getSplits(jdbcLayoutHandle); return (JdbcSplit) getOnlyElement(getFutureValue(splits.getNextBatch(NOT_PARTITIONED, 1000)).getSplits()); }