private void checkQFileTestHack() { boolean hackOn = MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST) || MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEZ_TEST); if (hackOn) { LOG.info("Hacking in canned values for transaction manager"); // Set up the transaction/locking db in the derby metastore TxnDbUtil.setConfValues(conf); try { TxnDbUtil.prepDb(conf); } catch (Exception e) { // We may have already created the tables and thus don't need to redo it. if (e.getMessage() != null && !e.getMessage().contains("already exists")) { throw new RuntimeException("Unable to set up transaction database for" + " testing: " + e.getMessage(), e); } } } }
public TestHiveWriter() throws Exception { port = 9083; metaStoreURI = null; int callTimeoutPoolSize = 1; callTimeoutPool = Executors.newFixedThreadPool(callTimeoutPoolSize, new ThreadFactoryBuilder().setNameFormat("hiveWriterTest").build()); // 1) Start metastore conf = HiveSetupUtil.getHiveConf(); TxnDbUtil.setConfValues(conf); if (metaStoreURI != null) { conf.setVar(HiveConf.ConfVars.METASTOREURIS, metaStoreURI); } }
public TestHiveBolt() throws Exception { //metaStoreURI = "jdbc:derby:;databaseName="+System.getProperty("java.io.tmpdir") +"metastore_db;create=true"; metaStoreURI = null; conf = HiveSetupUtil.getHiveConf(); TxnDbUtil.setConfValues(conf); if (metaStoreURI != null) { conf.setVar(HiveConf.ConfVars.METASTOREURIS, metaStoreURI); } }
public void prepareTransactionDatabase(HiveConf conf) throws Exception { TxnDbUtil.setConfValues(conf); TxnDbUtil.cleanDb(conf); TxnDbUtil.prepDb(conf); }
@Before public void setup() throws Exception { conf = new HiveConf(); TxnDbUtil.setConfValues(conf); TxnDbUtil.cleanDb(conf); ms = new HiveMetaStoreClient(conf); txnHandler = TxnUtils.getTxnStore(conf); tmpdir = new File(Files.createTempDirectory("compactor_test_table_").toString()); }
public TestCompactionTxnHandler() throws Exception { TxnDbUtil.setConfValues(conf); // Set config so that TxnUtils.buildQueryWithINClauseStrings() will // produce multiple queries conf.setIntVar(HiveConf.ConfVars.METASTORE_DIRECT_SQL_MAX_QUERY_LENGTH, 1); conf.setIntVar(HiveConf.ConfVars.METASTORE_DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE, 10); tearDown(); }
public TestDbTxnManager2() throws Exception { conf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); conf.setBoolVar(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, false); TxnDbUtil.setConfValues(conf); conf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); } @Before
@Before public void setUp() throws Exception { conf = MetastoreConf.newMetastoreConf(); TxnDbUtil.setConfValues(conf); TxnDbUtil.prepDb(conf); }
@Before public void setUp() throws Exception { MetaStoreTestUtils.setConfForStandloneMode(conf); TxnDbUtil.setConfValues(conf); TxnDbUtil.prepDb(conf); client = new HiveMetaStoreClient(conf); String connectionStr = MetastoreConf.getVar(conf, MetastoreConf.ConfVars.CONNECT_URL_KEY); conn = DriverManager.getConnection(connectionStr); }
public TestStreaming() throws Exception { partitionVals = new ArrayList<String>(2); partitionVals.add(PART1_CONTINENT); partitionVals.add(PART1_COUNTRY); partitionVals2 = new ArrayList<String>(1); partitionVals2.add(PART1_COUNTRY); conf = new HiveConf(this.getClass()); conf.set("fs.raw.impl", RawFileSystem.class.getName()); conf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); TxnDbUtil.setConfValues(conf); conf.setBoolVar(HiveConf.ConfVars.METASTORE_EXECUTE_SET_UGI, true); conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, true); dbFolder.create(); //1) Start from a clean slate (metastore) TxnDbUtil.cleanDb(conf); TxnDbUtil.prepDb(conf); //2) obtain metastore clients msClient = new HiveMetaStoreClient(conf); }
public TestDbTxnManager() throws Exception { conf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); TxnDbUtil.setConfValues(conf); SessionState.start(conf); ctx = new Context(conf); tearDown(); }
public TestStreaming() throws Exception { partitionVals = new ArrayList<String>(2); partitionVals.add(PART1_CONTINENT); partitionVals.add(PART1_COUNTRY); partitionVals2 = new ArrayList<String>(1); partitionVals2.add(PART1_COUNTRY); conf = new HiveConf(this.getClass()); conf.set("fs.raw.impl", RawFileSystem.class.getName()); conf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); TxnDbUtil.setConfValues(conf); if (metaStoreURI!=null) { conf.setVar(HiveConf.ConfVars.METASTOREURIS, metaStoreURI); } conf.setBoolVar(HiveConf.ConfVars.METASTORE_EXECUTE_SET_UGI, true); conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, true); dbFolder.create(); //1) Start from a clean slate (metastore) TxnDbUtil.cleanDb(conf); TxnDbUtil.prepDb(conf); //2) obtain metastore clients msClient = new HiveMetaStoreClient(conf); }
private void startThread(char type, boolean stopAfterOne, AtomicBoolean looped) throws Exception { TxnDbUtil.setConfValues(conf); CompactorThread t = null; switch (type) { case 'i': t = new Initiator(); break; case 'w': t = new Worker(); break; case 'c': t = new Cleaner(); break; default: throw new RuntimeException("Huh? Unknown thread type."); } t.setThreadId((int) t.getId()); t.setConf(conf); stop.set(stopAfterOne); t.init(stop, looped); if (stopAfterOne) t.run(); else t.start(); }
public TestTxnHandler() throws Exception { TxnDbUtil.setConfValues(conf); LoggerContext ctx = (LoggerContext) LogManager.getContext(false); Configuration conf = ctx.getConfiguration(); conf.getLoggerConfig(CLASS_NAME).setLevel(Level.DEBUG); ctx.updateLoggers(conf); tearDown(); }
public TestStreamingDynamicPartitioning() throws Exception { conf = new HiveConf(this.getClass()); conf.set("fs.raw.impl", RawFileSystem.class.getName()); conf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); TxnDbUtil.setConfValues(conf); conf.setBoolVar(HiveConf.ConfVars.METASTORE_EXECUTE_SET_UGI, true); conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, true); conf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); dbFolder.create(); loc1 = dbFolder.newFolder(dbName + ".db").toString(); //1) Start from a clean slate (metastore) TxnDbUtil.cleanDb(conf); TxnDbUtil.prepDb(conf); //2) obtain metastore clients msClient = new HiveMetaStoreClient(conf); }
@Before public void setUp() throws Exception { conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE, "None"); TxnDbUtil.setConfValues(conf); try { TxnDbUtil.prepDb(conf); } catch (SQLException e) { // Usually this means we've already created the tables, so clean them and then try again tearDown(); TxnDbUtil.prepDb(conf); } txnHandler = TxnUtils.getTxnStore(conf); }
hiveConf.setBoolVar(HiveConf.ConfVars.MERGE_CARDINALITY_VIOLATION_CHECK, true); hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSCOLAUTOGATHER, false); TxnDbUtil.setConfValues(hiveConf);
@Before public void setUp() throws Exception { conf = MetastoreConf.newMetastoreConf(); conf.setClass(MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS.getVarname(), MsckPartitionExpressionProxy.class, PartitionExpressionProxy.class); MetaStoreTestUtils.setConfForStandloneMode(conf); conf.setBoolean(MetastoreConf.ConfVars.MULTITHREADED.getVarname(), false); MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf); TxnDbUtil.setConfValues(conf); TxnDbUtil.prepDb(conf); client = new HiveMetaStoreClient(conf); }
hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSCOLAUTOGATHER, false); hiveConf.setBoolean("mapred.input.dir.recursive", true); TxnDbUtil.setConfValues(hiveConf); TxnDbUtil.prepDb(hiveConf); File f = new File(getWarehouseDir());
@SuppressWarnings("deprecation") @Before public void setUp() throws Exception { this.hiveConf = new HiveConf(TestStatsUpdaterThread.class); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, getTestDataDir()); hiveConf.setVar(HiveConf.ConfVars.HIVEINPUTFORMAT, HiveInputFormat.class.getName()); hiveConf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); // hiveConf.setBoolVar(HiveConf.ConfVars.MERGE_CARDINALITY_VIOLATION_CHECK, true); hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, true); hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSCOLAUTOGATHER, true); hiveConf.set(MetastoreConf.ConfVars.STATS_AUTO_UPDATE.getVarname(), "all"); TxnDbUtil.setConfValues(hiveConf); TxnDbUtil.prepDb(hiveConf); File f = new File(getTestDataDir()); if (f.exists()) { FileUtil.fullyDelete(f); } if (!(new File(getTestDataDir()).mkdirs())) { throw new RuntimeException("Could not create " + getTestDataDir()); } this.ss = DriverUtils.setUpSessionState(hiveConf, "hive", true); cleanUp(); }