@Override public int compareTo(CompactionInfo o) { return getFullPartitionName().compareTo(o.getFullPartitionName()); } public String toString() {
/** * loads object from a row in Select * from COMPACTION_QUEUE * @param rs ResultSet after call to rs.next() * @throws SQLException */ static CompactionInfo loadFullFromCompactionQueue(ResultSet rs) throws SQLException { CompactionInfo fullCi = new CompactionInfo(); fullCi.id = rs.getLong(1); fullCi.dbname = rs.getString(2); fullCi.tableName = rs.getString(3); fullCi.partName = rs.getString(4); fullCi.state = rs.getString(5).charAt(0);//cq_state fullCi.type = TxnHandler.dbCompactionType2ThriftType(rs.getString(6).charAt(0)); fullCi.properties = rs.getString(7); fullCi.workerId = rs.getString(8); fullCi.start = rs.getLong(9); fullCi.runAs = rs.getString(10); fullCi.highestWriteId = rs.getLong(11); fullCi.metaInfo = rs.getBytes(12); fullCi.hadoopJobId = rs.getString(13); return fullCi; } static void insertIntoCompletedCompactions(PreparedStatement pStmt, CompactionInfo ci, long endTime) throws SQLException {
if(!ci.isMajorCompaction()) { return; CommandProcessorResponse cpr = d.run(sb.toString()); if (cpr.getResponseCode() != 0) { throw new IOException("Could not update stats for table " + ci.getFullTableName() + (ci.partName == null ? "" : "/" + ci.partName) + " due to: " + cpr); throw new IOException("Could not update stats for table " + ci.getFullTableName() + (ci.partName == null ? "" : "/" + ci.partName) + " due to: " + cnre.getMessage());
CompactionInfo ci = new CompactionInfo(rs.getLong(1), rs.getString(2), rs.getString(3), rs.getString(4), rs.getString(5).charAt(0)); if(!ci.getFullPartitionName().equals(lastCompactedEntity)) { lastCompactedEntity = ci.getFullPartitionName(); rc = new RetentionCounters(MetastoreConf.getIntVar(conf, ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED), getFailedCompactionRetention(),
private void clean(CompactionInfo ci) throws MetaException { LOG.info("Starting cleaning for " + ci.getFullPartitionName()); try { Table t = resolveTable(ci); if (t == null) { LOG.info("Unable to find table " + ci.getFullTableName() + ", assuming it was dropped"); txnHandler.markCleaned(ci); return; if (p == null) { LOG.info("Unable to find partition " + ci.getFullPartitionName() + ", assuming it was dropped"); txnHandler.markCleaned(ci); removeFiles(location, txnList); } else { LOG.info("Cleaning as user " + ci.runAs + " for " + ci.getFullPartitionName()); UserGroupInformation ugi = UserGroupInformation.createProxyUser(ci.runAs, UserGroupInformation.getLoginUser()); } catch (IOException exception) { LOG.error("Could not clean up file-system handles for UGI: " + ugi + " for " + ci.getFullPartitionName(), exception);
t1 = resolveTable(ci); if (t1 == null) { LOG.info("Unable to find table " + ci.getFullTableName() + ", assuming it was dropped and moving on."); txnHandler.markCleaned(ci); p = resolvePartition(ci); if (p == null && ci.partName != null) { LOG.info("Unable to find partition " + ci.getFullPartitionName() + ", assuming it was dropped and moving on."); txnHandler.markCleaned(ci); final boolean isMajor = ci.isMajorCompaction(); final ValidTxnList txns = TxnUtils.createValidCompactTxnList(txnHandler.getOpenTxnsInfo()); final StringBuilder jobName = new StringBuilder(name); jobName.append("-compactor-"); jobName.append(ci.getFullPartitionName()); ci.getFullPartitionName()); } catch (IOException exception) { LOG.error("Could not clean up file-system handles for UGI: " + ugi + " for " + ci.getFullPartitionName(), exception);
final CompactionInfo ci = CompactionInfo.optionalCompactionInfoStructToInfo( msc.findNextCompact(workerName)); LOG.debug("Processing compaction request " + ci); t1 = resolveTable(ci); if (t1 == null) { LOG.info("Unable to find table " + ci.getFullTableName() + ", assuming it was dropped and moving on."); msc.markCleaned(CompactionInfo.compactionInfoToStruct(ci)); continue; msc.markCleaned(CompactionInfo.compactionInfoToStruct(ci)); continue; p = resolvePartition(ci); if (p == null && ci.partName != null) { LOG.info("Unable to find partition " + ci.getFullPartitionName() + ", assuming it was dropped and moving on."); msc.markCleaned(CompactionInfo.compactionInfoToStruct(ci)); continue; msc.markCleaned(CompactionInfo.compactionInfoToStruct(ci)); continue; msc.markCleaned(CompactionInfo.compactionInfoToStruct(ci)); continue; msc.updateCompactorState(CompactionInfo.compactionInfoToStruct(ci), compactorTxnId); final StringBuilder jobName = new StringBuilder(workerName);
LOG.warn(parsedDeltas.size() + " delta files found for " + ci.getFullPartitionName() + " located at " + sd.getLocation() + "! This is likely a sign of misconfiguration, " + "especially if this message repeats. Check that compaction is running properly. Check for any " + if (ci.isMajorCompaction()) {
private StatsUpdater(CompactionInfo ci, List<String> columnListForStats, HiveConf conf, String userName) { this.conf = conf; this.userName = userName; this.ci = ci; if(!ci.isMajorCompaction() || columnListForStats == null || columnListForStats.isEmpty()) { columnList = Collections.emptyList(); return; } columnList = columnListForStats; }
/** * Find the table being compacted * @param ci compaction info returned from the compaction queue * @return metastore table * @throws org.apache.hadoop.hive.metastore.api.MetaException if the table cannot be found. */ protected Table resolveTable(CompactionInfo ci) throws MetaException { try { return rs.getTable(ci.dbname, ci.tableName); } catch (MetaException e) { LOG.error("Unable to find table " + ci.getFullTableName() + ", " + e.getMessage()); throw e; } }
public static OptionalCompactionInfoStruct compactionInfoToOptionalStruct(CompactionInfo ci) { CompactionInfoStruct cis = compactionInfoToStruct(ci); OptionalCompactionInfoStruct ocis = new OptionalCompactionInfoStruct(); if (cis != null) { ocis.setCi(cis); } return ocis; } public static CompactionInfo optionalCompactionInfoStructToInfo(OptionalCompactionInfoStruct ocis) {
@Override public OptionalCompactionInfoStruct find_next_compact(String workerId) throws MetaException{ return CompactionInfo.compactionInfoToOptionalStruct( getTxnHandler().findNextToCompact(workerId)); }
"checking to see if we should compact any of them"); for (CompactionInfo ci : potentials) { LOG.info("Checking to see if we should compact " + ci.getFullPartitionName()); try { Table t = resolveTable(ci); if (t == null) { LOG.info("Can't find table " + ci.getFullTableName() + ", assuming it's a temp " + "table or has been dropped and moving on."); continue; LOG.debug("Skipping entry for " + ci.getFullTableName() + " as it is from dynamic" + " partitioning"); continue; ci.getFullPartitionName() + " so we will not initiate another compaction"); continue; LOG.warn("Will not initiate compaction for " + ci.getFullPartitionName() + " since last " + HiveConf.ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD + " attempts to compact it failed."); txnHandler.markFailed(ci); LOG.info("Can't find partition " + ci.getFullPartitionName() + ", assuming it has been dropped and moving on."); continue;
t1 = resolveTable(ci); if (t1 == null) { LOG.info("Unable to find table " + ci.getFullTableName() + ", assuming it was dropped and moving on."); txnHandler.markCleaned(ci); p = resolvePartition(ci); if (p == null && ci.partName != null) { LOG.info("Unable to find partition " + ci.getFullPartitionName() + ", assuming it was dropped and moving on."); txnHandler.markCleaned(ci); final boolean isMajor = ci.isMajorCompaction(); final ValidTxnList txns = CompactionTxnHandler.createValidCompactTxnList(txnHandler.getOpenTxnsInfo()); final StringBuffer jobName = new StringBuffer(name); jobName.append("-compactor-"); jobName.append(ci.getFullPartitionName()); ci.getFullPartitionName()); LOG.error("Caught exception while trying to compact " + ci.getFullPartitionName() + ". Marking clean to avoid repeated failures, " + StringUtils.stringifyException(e)); txnHandler.markCleaned(ci);
LOG.warn(parsedDeltas.size() + " delta files found for " + ci.getFullPartitionName() + " located at " + sd.getLocation() + "! This is likely a sign of misconfiguration, " + "especially if this message repeats. Check that compaction is running properly. Check for any " + if (ci.isMajorCompaction()) {
private StatsUpdater(CompactionInfo ci, List<String> columnListForStats, HiveConf conf, String userName) { this.conf = new HiveConf(conf); //so that Driver doesn't think it's arleady in a transaction this.conf.unset(ValidTxnList.VALID_TXNS_KEY); this.userName = userName; this.ci = ci; if (!ci.isMajorCompaction() || columnListForStats == null || columnListForStats.isEmpty()) { columnList = Collections.emptyList(); return; } columnList = columnListForStats; }
@Override Table resolveTable(CompactionInfo ci) throws MetaException { try { return rs.getTable(getDefaultCatalog(conf), ci.dbname, ci.tableName); } catch (MetaException e) { LOG.error("Unable to find table " + ci.getFullTableName() + ", " + e.getMessage()); throw e; } }
"checking to see if we should compact any of them"); for (CompactionInfo ci : potentials) { LOG.info("Checking to see if we should compact " + ci.getFullPartitionName()); try { Table t = resolveTable(ci); if (t == null) { LOG.info("Can't find table " + ci.getFullTableName() + ", assuming it's a temp " + "table or has been dropped and moving on."); continue; LOG.debug("Skipping entry for " + ci.getFullTableName() + " as it is from dynamic" + " partitioning"); continue; ci.getFullPartitionName() + " so we will not initiate another compaction"); continue; LOG.warn("Will not initiate compaction for " + ci.getFullPartitionName() + " since last " + HiveConf.ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD + " attempts to compact it failed."); txnHandler.markFailed(ci); LOG.info("Can't find partition " + ci.getFullPartitionName() + ", assuming it has been dropped and moving on."); continue;
@Override public int hashCode() { int result = 17; result = 31 * result + this.getFullPartitionName().hashCode(); return result; }
rs = stmt.executeQuery(s); while (rs.next()) { CompactionInfo info = new CompactionInfo(); info.dbname = rs.getString(1); info.tableName = rs.getString(2); rs = stmt.executeQuery(s); while (rs.next()) { CompactionInfo info = new CompactionInfo(); info.dbname = rs.getString(1); info.tableName = rs.getString(2);