protected void assertCompatibleVersion(String hiveSchemaVersion, String dbSchemaVersion) throws HiveMetaException { if (!metaStoreSchemaInfo.isVersionCompatible(hiveSchemaVersion, dbSchemaVersion)) { throw new HiveMetaException("Metastore schema version is not compatible. Hive Version: " + hiveSchemaVersion + ", Database Schema Version: " + dbSchemaVersion); } }
private long getDbId(Statement stmt, String db, String catalog) throws SQLException, HiveMetaException { String query = String.format(schemaTool.quote(DB_ID_QUERY), db, catalog); LOG.debug("Going to run " + query); try (ResultSet rs = stmt.executeQuery(query)) { if (!rs.next()) { throw new HiveMetaException("Unable to find database " + fromDb); } return rs.getLong(1); } }
private void updateDbNameForTable(Statement stmt, String tableName, String tableColumnName, String fromCat, String toCat, String fromDb, String toDb, String hiveTblName) throws HiveMetaException, SQLException { String update = String.format(schemaTool.quote(UPDATE_DB_NAME_STMT), tableName, toCat, toDb, fromCat, fromDb, tableColumnName, hiveTblName); LOG.debug("Going to run " + update); int numUpdated = stmt.executeUpdate(update); if (numUpdated > 1 || numUpdated < 0) { throw new HiveMetaException("Failed to properly update the " + tableName + " table. Expected to update 1 row but instead updated " + numUpdated); } } }
@Override public String getCreateUserScript() throws HiveMetaException { String createScript = CREATE_USER_PREFIX + "." + dbType + SQL_FILE_EXTENSION; // check if the file exists if (!(new File(getMetaStoreScriptDir() + File.separatorChar + createScript).exists())) { throw new HiveMetaException("Unable to find create user file, expected: " + createScript); } return createScript; }
protected void testConnectionToMetastore() throws HiveMetaException { Connection conn = getConnectionToMetastore(true); try { conn.close(); } catch (SQLException e) { throw new HiveMetaException("Failed to close metastore connection", e); } }
private void updateCatalogNameInTable(Statement stmt, String tableName, String catColName, String dbColName, String fromCatName, String toCatName, String dbName, boolean zeroUpdatesOk) throws HiveMetaException, SQLException { String update = String.format(schemaTool.quote(UPDATE_CATALOG_NAME_STMT), tableName, catColName, toCatName, catColName, fromCatName, dbColName, dbName); LOG.debug("Going to run " + update); int numUpdated = stmt.executeUpdate(update); if (numUpdated != 1 && !(zeroUpdatesOk && numUpdated == 0)) { throw new HiveMetaException("Failed to properly update the " + tableName + " table. Expected to update 1 row but instead updated " + numUpdated); } } }
private void loadAllUpgradeScripts(String dbType) throws HiveMetaException { // load upgrade order for the given dbType List<String> upgradeOrderList = new ArrayList<>(); String upgradeListFile = getMetaStoreScriptDir() + File.separator + VERSION_UPGRADE_LIST + "." + dbType; try (FileReader fr = new FileReader(upgradeListFile); BufferedReader bfReader = new BufferedReader(fr)) { String currSchemaVersion; while ((currSchemaVersion = bfReader.readLine()) != null) { upgradeOrderList.add(currSchemaVersion.trim()); } } catch (FileNotFoundException e) { throw new HiveMetaException("File " + upgradeListFile + "not found ", e); } catch (IOException e) { throw new HiveMetaException("Error reading " + upgradeListFile, e); } hiveSchemaVersions = upgradeOrderList.toArray(new String[0]); }
private int getNextCatalogId(Statement stmt) throws SQLException, HiveMetaException { String query = schemaTool.quote(NEXT_CATALOG_ID_QUERY); LOG.debug("Going to run " + query); try (ResultSet rs = stmt.executeQuery(query)) { if (!rs.next()) { throw new HiveMetaException("No catalogs found, have you upgraded the database?"); } int nextId = rs.getInt(1) + 1; // We need to stay out of the way of any sequences used by the underlying database. // Otherwise the next time the client tries to add a catalog we'll get an error. // There should never be billions of catalogs, so we'll shift our sequence number up // there to avoid clashes. int floor = 1 << 30; return Math.max(nextId, floor); } }
@VisibleForTesting boolean validateColumnNullValues(Connection conn) throws HiveMetaException { System.out.println("Validating columns for incorrect NULL values."); boolean isValid = true; String queryColumnNullValues = schemaTool.quote(QUERY_COLUMN_NULL_VALUES); try (Statement stmt = conn.createStatement(); ResultSet res = stmt.executeQuery(queryColumnNullValues)) { while (res.next()) { long tableId = res.getLong("TBL_ID"); String tableName = res.getString("TBL_NAME"); String tableType = res.getString("TBL_TYPE"); isValid = false; System.err.println("SD_ID in TBLS should not be NULL for Table Name=" + tableName + ", Table ID=" + tableId + ", Table Type=" + tableType); } System.out.println(isValid ? "[SUCCESS]\n" : "[FAIL]\n"); return isValid; } catch(SQLException e) { throw new HiveMetaException("Failed to validate columns for incorrect NULL values", e); } } }
private void oracleCreateUserHack(File createFile) throws HiveMetaException { LOG.debug("Found oracle, hacking our way through it rather than using SqlLine"); try (BufferedReader reader = new BufferedReader(new FileReader(createFile))) { try (Connection conn = schemaTool.getConnectionToMetastore(false)) { try (Statement stmt = conn.createStatement()) { reader.lines() .forEach(s -> { assert s.charAt(s.length() - 1) == ';'; try { stmt.execute(s.substring(0, s.length() - 1)); } catch (SQLException e) { LOG.error("statement <" + s.substring(0, s.length() - 2) + "> failed", e); throw new RuntimeException(e); } }); } } } catch (IOException e) { LOG.error("Caught IOException trying to read modified create user script " + createFile.getAbsolutePath(), e); throw new HiveMetaException(e); } catch (HiveMetaException e) { LOG.error("Failed to connect to RDBMS", e); throw e; } catch (SQLException e) { LOG.error("Got SQLException", e); } } }
private void updateTableId(Statement stmt) throws SQLException, HiveMetaException { // Find the old database id long oldDbId = getDbId(stmt, fromDb, fromCat); // Find the new database id long newDbId = getDbId(stmt, toDb, toCat); String update = String.format(schemaTool.quote(UPDATE_TABLE_ID_STMT), newDbId, oldDbId, tableName); LOG.debug("Going to run " + update); int numUpdated = stmt.executeUpdate(update); if (numUpdated != 1) { throw new HiveMetaException( "Failed to properly update TBLS table. Expected to update " + "1 row but instead updated " + numUpdated); } }
private boolean checkMetaStoreDBLocation(Connection conn, URI[] defaultServers) throws HiveMetaException { String dbLocQuery = schemaTool.quote(QUERY_DB_LOCATION); int numOfInvalid = 0; try (Statement stmt = conn.createStatement(); ResultSet res = stmt.executeQuery(dbLocQuery)) { while (res.next()) { String locValue = res.getString(3); String dbName = getNameOrID(res, 2, 1); if (!checkLocation("Database " + dbName, locValue, defaultServers)) { numOfInvalid++; } } } catch (SQLException e) { throw new HiveMetaException("Failed to get DB Location Info.", e); } return numOfInvalid == 0; }
/*** * Get the name of the script to initialize the schema for given version * @param toVersion Target version. If it's null, then the current server version is used * @return * @throws HiveMetaException */ @Override public String generateInitFileName(String toVersion) throws HiveMetaException { if (toVersion == null) { toVersion = getHiveSchemaVersion(); } String initScriptName = INIT_FILE_PREFIX + toVersion + "." + dbType + SQL_FILE_EXTENSION; // check if the file exists if (!(new File(getMetaStoreScriptDir() + File.separatorChar + initScriptName).exists())) { throw new HiveMetaException("Unknown version specified for initialization: " + toVersion); } return initScriptName; }
private void ensureFromVersion() throws HiveMetaException { if (fromVersion != null) { return; } // If null, then read from the metastore MetaStoreConnectionInfo connectionInfo = schemaTool.getConnectionInfo(false); fromVersion = schemaTool.getMetaStoreSchemaInfo().getMetaStoreSchemaVersion(connectionInfo); if (fromVersion == null || fromVersion.isEmpty()) { throw new HiveMetaException("Schema version not stored in the metastore. " + "Metastore schema is too old or corrupt. Try specifying the version manually"); } System.out.println("Upgrading from the version " + fromVersion); }
@Override public String getMetaStoreSchemaVersion(MetaStoreConnectionInfo connectionInfo) throws HiveMetaException { String versionQuery; boolean needsQuotedIdentifier = HiveSchemaHelper.getDbCommandParser(connectionInfo.getDbType(), connectionInfo.getMetaDbType(), false).needsQuotedIdentifier(); if (needsQuotedIdentifier) { versionQuery = "select t.\"SCHEMA_VERSION\" from \"VERSION\" t"; } else { versionQuery = "select t.SCHEMA_VERSION from VERSION t"; } String schema = ( HiveSchemaHelper.DB_HIVE.equals(connectionInfo.getDbType()) ? "SYS" : null ); try (Connection metastoreDbConnection = HiveSchemaHelper.getConnectionToMetastore(connectionInfo, schema); Statement stmt = metastoreDbConnection.createStatement()) { ResultSet res = stmt.executeQuery(versionQuery); if (!res.next()) { throw new HiveMetaException("Could not find version info in metastore VERSION table."); } String currentSchemaVersion = res.getString(1); if (res.next()) { throw new HiveMetaException("Multiple versions were found in metastore."); } return currentSchemaVersion; } catch (SQLException e) { throw new HiveMetaException("Failed to get schema version, Cause:" + e.getMessage()); } } }
@Override void execute() throws HiveMetaException { System.out.println("Starting metastore validation\n"); Connection conn = schemaTool.getConnectionToMetastore(false); boolean success = true; try { success &= validateSchemaVersions(); success &= validateSequences(conn); success &= validateSchemaTables(conn); success &= validateLocations(conn, schemaTool.getValidationServers()); success &= validateColumnNullValues(conn); } finally { if (conn != null) { try { conn.close(); } catch (SQLException e) { throw new HiveMetaException("Failed to close metastore connection", e); } } } System.out.print("Done with metastore validation: "); if (!success) { System.out.println("[FAIL]"); throw new HiveMetaException("Validation failed"); } else { System.out.println("[SUCCESS]"); } }
@Override void execute() throws HiveMetaException { Connection conn = schemaTool.getConnectionToMetastore(true); boolean success = false; try { conn.setAutoCommit(false); try (Statement stmt = conn.createStatement()) { updateTableId(stmt); updateDbNameForTable(stmt, "TAB_COL_STATS", "TABLE_NAME", fromCat, toCat, fromDb, toDb, tableName); updateDbNameForTable(stmt, "PART_COL_STATS", "TABLE_NAME", fromCat, toCat, fromDb, toDb, tableName); updateDbNameForTable(stmt, "PARTITION_EVENTS", "TBL_NAME", fromCat, toCat, fromDb, toDb, tableName); updateDbNameForTable(stmt, "NOTIFICATION_LOG", "TBL_NAME", fromCat, toCat, fromDb, toDb, tableName); conn.commit(); success = true; } } catch (SQLException se) { throw new HiveMetaException("Failed to move table", se); } finally { try { if (!success) { conn.rollback(); } } catch (SQLException e) { // Not really much we can do here. LOG.error("Failed to rollback, everything will probably go bad from here."); } } }
@Override void execute() throws HiveMetaException { schemaTool.testConnectionToMetastore(); System.out.println("Starting user creation"); String scriptDir = schemaTool.getMetaStoreSchemaInfo().getMetaStoreScriptDir(); String protoCreateFile = schemaTool.getMetaStoreSchemaInfo().getCreateUserScript(); try { File createFile = subUserAndPassword(scriptDir, protoCreateFile); System.out.println("Creation script " + createFile.getAbsolutePath()); if (!schemaTool.isDryRun()) { if ("oracle".equals(schemaTool.getDbType())) oracleCreateUserHack(createFile); else schemaTool.execSql(createFile.getParent(), createFile.getName()); System.out.println("User creation completed"); } } catch (IOException e) { throw new HiveMetaException("User creation FAILED!" + " Metastore unusable !!", e); } }
@Override void execute() throws HiveMetaException { ensureToVersion(); schemaTool.testConnectionToMetastore(); System.out.println("Starting metastore schema initialization to " + toVersion); String initScriptDir = schemaTool.getMetaStoreSchemaInfo().getMetaStoreScriptDir(); String initScriptFile = schemaTool.getMetaStoreSchemaInfo().generateInitFileName(toVersion); try { System.out.println("Initialization script " + initScriptFile); if (!schemaTool.isDryRun()) { schemaTool.execSql(initScriptDir, initScriptFile); System.out.println("Initialization script completed"); } } catch (IOException e) { throw new HiveMetaException("Schema initialization FAILED! Metastore state would be inconsistent!", e); } if (validate) { schemaTool.verifySchemaVersion(); } } }
@Override void execute() throws HiveMetaException { ensureFromVersion(); if (schemaTool.getMetaStoreSchemaInfo().getHiveSchemaVersion().equals(fromVersion)) { System.out.println("No schema upgrade required from version " + fromVersion); return; } // Find the list of scripts to execute for this upgrade List<String> upgradeScripts = schemaTool.getMetaStoreSchemaInfo().getUpgradeScripts(fromVersion); schemaTool.testConnectionToMetastore(); System.out.println("Starting upgrade metastore schema from version " + fromVersion + " to " + schemaTool.getMetaStoreSchemaInfo().getHiveSchemaVersion()); String scriptDir = schemaTool.getMetaStoreSchemaInfo().getMetaStoreScriptDir(); try { for (String scriptFile : upgradeScripts) { System.out.println("Upgrade script " + scriptFile); if (!schemaTool.isDryRun()) { runPreUpgrade(scriptDir, scriptFile); schemaTool.execSql(scriptDir, scriptFile); System.out.println("Completed " + scriptFile); } } } catch (IOException e) { throw new HiveMetaException("Upgrade FAILED! Metastore state would be inconsistent !!", e); } // Revalidated the new version after upgrade schemaTool.verifySchemaVersion(); }