/** * Translate hive table types into jdbc table types. * @param hivetabletype * @return the type of the table */ public static String toJdbcTableType(String hivetabletype) { if (hivetabletype==null) { return null; } else if (hivetabletype.equals(TableType.MANAGED_TABLE.toString())) { return "TABLE"; } else if (hivetabletype.equals(TableType.VIRTUAL_VIEW.toString())) { return "VIEW"; } else if (hivetabletype.equals(TableType.EXTERNAL_TABLE.toString())) { return "EXTERNAL TABLE"; } else if (hivetabletype.equals(TableType.MATERIALIZED_VIEW.toString())) { return "MATERIALIZED VIEW"; } else { return hivetabletype; } }
public static boolean isMaterializedViewTable(Table table) { if (table == null) { return false; } return TableType.MATERIALIZED_VIEW.toString().equals(table.getTableType()); }
public static boolean isView(Table table) { if (table == null) { return false; } return TableType.VIRTUAL_VIEW.toString().equals(table.getTableType()); }
private void validateTableType(Table tbl) { // If the table has property EXTERNAL set, update table type // accordingly String tableType = tbl.getTableType(); boolean isExternal = Boolean.parseBoolean(tbl.getParameters().get("EXTERNAL")); if (TableType.MANAGED_TABLE.toString().equals(tableType)) { if (isExternal) { tableType = TableType.EXTERNAL_TABLE.toString(); } } if (TableType.EXTERNAL_TABLE.toString().equals(tableType)) { if (!isExternal) { tableType = TableType.MANAGED_TABLE.toString(); } } tbl.setTableType(tableType); }
public void setTableType(TableType tableType) { tTable.setTableType(tableType.toString()); }
public void setTableType(TableType tableType) { tTable.setTableType(tableType.toString()); }
@JsonIgnore public TableType getJdbcTableType() { if (table.getTable().getTableType().equals(org.apache.hadoop.hive.metastore.TableType.VIRTUAL_VIEW.toString())) { return TableType.VIEW; } return TableType.TABLE; }
private static Boolean isViewTable(Table t) { return t.isSetTableType() ? t.getTableType().equals(TableType.VIRTUAL_VIEW.toString()) : null; }
/** * Get table names by using direct SQL queries. * * @param dbName Metastore database namme * @return list of table names */ public List<String> getMaterializedViewsForRewriting(String dbName) throws MetaException { String queryText = "SELECT " + TBLS + ".\"TBL_NAME\"" + " FROM " + TBLS + " " + " INNER JOIN " + DBS + " ON " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\" " + " WHERE " + DBS + ".\"NAME\" = ? AND " + TBLS + ".\"TBL_TYPE\" = ? " ; List<String> pms = new ArrayList<String>(); pms.add(dbName); pms.add(TableType.MATERIALIZED_VIEW.toString()); Query<?> queryParams = pm.newQuery("javax.jdo.query.SQL", queryText); return executeWithArray( queryParams, pms.toArray(), queryText); }
public ClassicTableTypeMapping () { hiveToClientMap.put(TableType.MANAGED_TABLE.name(), ClassicTableTypes.TABLE.name()); hiveToClientMap.put(TableType.EXTERNAL_TABLE.name(), ClassicTableTypes.TABLE.name()); hiveToClientMap.put(TableType.VIRTUAL_VIEW.name(), ClassicTableTypes.VIEW.name()); hiveToClientMap.put(TableType.MATERIALIZED_VIEW.toString(), ClassicTableTypes.MATERIALIZED_VIEW.toString()); clientToHiveMap.putAll(ClassicTableTypes.TABLE.name(), Arrays.asList( TableType.MANAGED_TABLE.name(), TableType.EXTERNAL_TABLE.name())); clientToHiveMap.put(ClassicTableTypes.VIEW.name(), TableType.VIRTUAL_VIEW.name()); clientToHiveMap.put(ClassicTableTypes.MATERIALIZED_VIEW.toString(), TableType.MATERIALIZED_VIEW.toString()); }
@Override public List<String> getTables(String catName, String dbName, String tablePattern, TableType tableType) throws TException { List<String> tables = client.get_tables_by_type(prependCatalogToDbName(catName, dbName, conf), tablePattern, tableType.toString()); return FilterUtils.filterTableNamesIfEnabled(isClientFilterEnabled, filterHook, catName, dbName, tables); }
private boolean isViewTable(String catName, String dbName, String tblName) throws MetaException { Query query = null; try { String queryText = "select \"TBL_TYPE\" from " + TBLS + "" + " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\" " + " where " + TBLS + ".\"TBL_NAME\" = ? and " + DBS + ".\"NAME\" = ? and " + DBS + ".\"CTLG_NAME\" = ?"; Object[] params = new Object[] { tblName, dbName, catName }; query = pm.newQuery("javax.jdo.query.SQL", queryText); query.setUnique(true); Object result = executeWithArray(query, params, queryText); return (result != null) && result.toString().equals(TableType.VIRTUAL_VIEW.toString()); } finally { if (query != null) { query.closeAll(); } } }
public List<String> listCachedTableNames(String catName, String dbName, String pattern, TableType tableType) { List<String> tableNames = new ArrayList<>(); try { cacheLock.readLock().lock(); for (TableWrapper wrapper : tableCache.values()) { if (wrapper.sameDatabase(catName, dbName) && CacheUtils.matches(wrapper.getTable().getTableName(), pattern) && wrapper.getTable().getTableType().equals(tableType.toString())) { tableNames.add(StringUtils.normalizeIdentifier(wrapper.getTable().getTableName())); } } } finally { cacheLock.readLock().unlock(); } return tableNames; }
@Override public void preCreateTable(Table table) throws MetaException { if (!table.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) { throw new MetaException(KAFKA_STORAGE_HANDLER + " supports only " + TableType.EXTERNAL_TABLE); } Arrays.stream(KafkaTableProperties.values()) .filter(KafkaTableProperties::isMandatory) .forEach(key -> Preconditions.checkNotNull(table.getParameters().get(key.getName()), "Set Table property " + key.getName())); // Put all the default at the pre create. Arrays.stream(KafkaTableProperties.values()).forEach((key) -> { if (table.getParameters().get(key.getName()) == null) { table.putToParameters(key.getName(), key.getDefaultValue()); } }); }
public synchronized void truncateUnpartitionedTable(ConnectorSession session, String databaseName, String tableName) { checkReadable(); Optional<Table> table = getTable(databaseName, tableName); SchemaTableName schemaTableName = new SchemaTableName(databaseName, tableName); if (!table.isPresent()) { throw new TableNotFoundException(schemaTableName); } if (!table.get().getTableType().equals(MANAGED_TABLE.toString())) { throw new PrestoException(NOT_SUPPORTED, "Cannot delete from non-managed Hive table"); } if (!table.get().getPartitionColumns().isEmpty()) { throw new IllegalArgumentException("Table is partitioned"); } Path path = new Path(table.get().getStorage().getLocation()); HdfsContext context = new HdfsContext(session, databaseName, tableName); setExclusive((delegate, hdfsEnvironment) -> { RecursiveDeleteResult recursiveDeleteResult = recursiveDeleteFiles(hdfsEnvironment, context, path, ImmutableList.of(""), false); if (!recursiveDeleteResult.getNotDeletedEligibleItems().isEmpty()) { throw new PrestoException(HIVE_FILESYSTEM_ERROR, format( "Error deleting from unpartitioned table %s. These items can not be deleted: %s", schemaTableName, recursiveDeleteResult.getNotDeletedEligibleItems())); } }); }
public TableWrapper(org.apache.hadoop.hive.metastore.api.Table apiTable) { org.apache.hadoop.hive.metastore.api.Table wrapperApiTable = apiTable.deepCopy(); if (wrapperApiTable.getTableType() == null){ // TableType specified was null, we need to figure out what type it was. if (MetaStoreUtils.isExternalTable(wrapperApiTable)){ wrapperApiTable.setTableType(TableType.EXTERNAL_TABLE.toString()); } else if (MetaStoreUtils.isMaterializedViewTable(wrapperApiTable)) { wrapperApiTable.setTableType(TableType.MATERIALIZED_VIEW.toString()); } else if ((wrapperApiTable.getSd() == null) || (wrapperApiTable.getSd().getLocation() == null)) { wrapperApiTable.setTableType(TableType.VIRTUAL_VIEW.toString()); } else { wrapperApiTable.setTableType(TableType.MANAGED_TABLE.toString()); } } initialize(wrapperApiTable); } }
public static void checkTableIsWritable(Table table, boolean writesToNonManagedTablesEnabled) { if (!writesToNonManagedTablesEnabled && !table.getTableType().equals(MANAGED_TABLE.toString())) { throw new PrestoException(NOT_SUPPORTED, "Cannot write to non-managed Hive table"); } checkWritable( new SchemaTableName(table.getDatabaseName(), table.getTableName()), Optional.empty(), getProtectMode(table), table.getParameters(), table.getStorage()); }
private Table createTestTbl(String dbName, String tblName, String tblOwner, List<FieldSchema> cols, List<FieldSchema> ptnCols) { String serdeLocation = "file:/tmp"; Map<String, String> serdeParams = new HashMap<>(); Map<String, String> tblParams = new HashMap<>(); SerDeInfo serdeInfo = new SerDeInfo("serde", "seriallib", new HashMap<>()); StorageDescriptor sd = new StorageDescriptor(cols, serdeLocation, "input", "output", false, 0, serdeInfo, null, null, serdeParams); sd.setStoredAsSubDirectories(false); Table tbl = new Table(tblName, dbName, tblOwner, 0, 0, 0, sd, ptnCols, tblParams, null, null, TableType.MANAGED_TABLE.toString()); tbl.setCatName(DEFAULT_CATALOG_NAME); return tbl; }
/** {@inheritDoc} */ @Override public List<String> getTables(String dbname, String tablePattern, TableType tableType) throws MetaException { try { return filterHook.filterTableNames(null, dbname, client.get_tables_by_type(dbname, tablePattern, tableType.toString())); } catch (Exception e) { MetaStoreUtils.logAndThrowMetaException(e); } return null; }
private static Table table( List<Column> partitionColumns, Optional<HiveBucketProperty> bucketProperty) { Table.Builder tableBuilder = Table.builder(); tableBuilder.getStorageBuilder() .setStorageFormat( StorageFormat.create( "com.facebook.hive.orc.OrcSerde", "org.apache.hadoop.hive.ql.io.RCFileInputFormat", "org.apache.hadoop.hive.ql.io.RCFileInputFormat")) .setLocation("hdfs://VOL1:9000/db_name/table_name") .setSkewed(false) .setBucketProperty(bucketProperty); return tableBuilder .setDatabaseName("test_dbname") .setOwner("testOwner") .setTableName("test_table") .setTableType(TableType.MANAGED_TABLE.toString()) .setDataColumns(ImmutableList.of(new Column("col1", HIVE_STRING, Optional.empty()))) .setParameters(ImmutableMap.of()) .setPartitionColumns(partitionColumns) .build(); }