sb.append("Possible error:\n " + e.getError() + "\n\n"); sb.append("Solution:\n " + e.getSolution() + "\n");
@Override public ErrorAndSolution getErrorAndSolution() { ErrorAndSolution es = null; if(getQueryMatches()) { Map<String, List<String>> rll = getRegexToLogLines(); if (rll.get(EXCEPTION_REGEX).size() > 0 && rll.get(SPLIT_REGEX).size() > 0) { // There should only be a single split line... String splitLogLine = rll.get(SPLIT_REGEX).get(0); // Extract only 'split: hdfs://...' Pattern p = Pattern.compile(SPLIT_REGEX, Pattern.CASE_INSENSITIVE); Matcher m = p.matcher(splitLogLine); m.find(); String splitStr = m.group(1); es = new ErrorAndSolution( "Data file " + splitStr + " is corrupted.", "Replace file. i.e. by re-running the query that produced the " + "source table / partition."); } } reset(); return es; } }
sb.append("Possible error:\n " + e.getError() + "\n\n"); sb.append("Solution:\n " + e.getSolution() + "\n");
String errorCode = m2.group(); es = new ErrorAndSolution( "A user-supplied transfrom script has exited with error code " + errorCode + " instead of 0.",
@Test public void testScriptErrorHeuristic() throws Exception { JobConf jobConf = new JobConf(); HiveConf.setQueryString(jobConf, "select * from foo group by moo;"); final TaskLogProcessor taskLogProcessor = new TaskLogProcessor(jobConf); String errorCode = "7874"; // example code String content = "line a\nlineb\n" + "Script failed with code " + errorCode + " line c\nlineD\n"; File log3File = writeTestLog("1", content); taskLogProcessor.addTaskAttemptLogUrl(log3File.toURI().toURL().toString()); List<ErrorAndSolution> errList = taskLogProcessor.getErrors(); assertEquals(1, errList.size()); final ErrorAndSolution eas = errList.get(0); String error = eas.getError(); assertNotNull(error); // check that the error code is present in the error description: assertTrue(error.indexOf(errorCode) >= 0); String solution = eas.getSolution(); assertNotNull(solution); assertTrue(solution.length() > 0); }
@Override public ErrorAndSolution getErrorAndSolution() { ErrorAndSolution es = null; if(getQueryMatches()) { Map<String, List<String>> rll = getRegexToLogLines(); if (rll.get(EXCEPTION_REGEX).size() > 0 && rll.get(SPLIT_REGEX).size() > 0) { // There should only be a single split line... String splitLogLine = rll.get(SPLIT_REGEX).get(0); // Extract only 'split: hdfs://...' Pattern p = Pattern.compile(SPLIT_REGEX, Pattern.CASE_INSENSITIVE); Matcher m = p.matcher(splitLogLine); m.find(); String splitStr = m.group(1); es = new ErrorAndSolution( "Data file " + splitStr + " is corrupted.", "Replace file. i.e. by re-running the query that produced the " + "source table / partition."); } } reset(); return es; } }
@Test public void testMapAggrMemErrorHeuristic() throws Exception { JobConf jobConf = new JobConf(); HiveConf.setQueryString(jobConf, "select * from foo group by moo;"); final TaskLogProcessor taskLogProcessor = new TaskLogProcessor(jobConf); Throwable oome = new OutOfMemoryError("java heap space"); File log1File = writeTestLog("1", toString(oome)); taskLogProcessor.addTaskAttemptLogUrl(log1File.toURI().toURL().toString()); List<ErrorAndSolution> errList = taskLogProcessor.getErrors(); assertEquals(1, errList.size()); final ErrorAndSolution eas = errList.get(0); String error = eas.getError(); assertNotNull(error); // check that the error code is present in the error description: assertTrue(error.contains("memory")); String solution = eas.getSolution(); assertNotNull(solution); assertTrue(solution.length() > 0); String confName = HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY.toString(); assertTrue(solution.contains(confName)); }
String errorCode = m2.group(); es = new ErrorAndSolution( "A user-supplied transfrom script has exited with error code " + errorCode + " instead of 0.",
String error = eas.getError(); assertNotNull(error); String solution = eas.getSolution(); assertNotNull(solution); assertTrue(solution.length() > 0);
@Override public ErrorAndSolution getErrorAndSolution() { ErrorAndSolution es = null; if(getQueryMatches() && configMatches) { List<String> matchingLines = getRegexToLogLines().get(OUT_OF_MEMORY_REGEX); if (matchingLines.size() > 0) { String confName = HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY.toString(); float confValue = HiveConf.getFloatVar(getConf(), HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY); es = new ErrorAndSolution( "Out of memory due to hash maps used in map-side aggregation.", "Currently " + confName + " is set to " + confValue + ". " + "Try setting it to a lower value. i.e " + "'set " + confName + " = " + confValue/2 + ";'"); } } reset(); return es; } }
sb.append("Possible error:\n " + e.getError() + "\n\n"); sb.append("Solution:\n " + e.getSolution() + "\n");
@Override public ErrorAndSolution getErrorAndSolution() { ErrorAndSolution es = null; if(getQueryMatches() && configMatches) { List<String> matchingLines = getRegexToLogLines().get(OUT_OF_MEMORY_REGEX); if (matchingLines.size() > 0) { String confName = HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY.toString(); float confValue = HiveConf.getFloatVar(getConf(), HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY); es = new ErrorAndSolution( "Out of memory due to hash maps used in map-side aggregation.", "Currently " + confName + " is set to " + confValue + ". " + "Try setting it to a lower value. i.e " + "'set " + confName + " = " + confValue/2 + ";'"); } } reset(); return es; } }
sb.append("Possible error:\n " + e.getError() + "\n\n"); sb.append("Solution:\n " + e.getSolution() + "\n");
@Override public ErrorAndSolution getErrorAndSolution() { ErrorAndSolution es = null; if(getQueryMatches()) { Map<String, List<String>> rll = getRegexToLogLines(); if (rll.get(EXCEPTION_REGEX).size() > 0 && rll.get(SPLIT_REGEX).size() > 0) { // There should only be a single split line... assert(rll.get(SPLIT_REGEX).size()==1); String splitLogLine = rll.get(SPLIT_REGEX).get(0); // Extract only 'split: hdfs://...' Pattern p = Pattern.compile(SPLIT_REGEX, Pattern.CASE_INSENSITIVE); Matcher m = p.matcher(splitLogLine); m.find(); String splitStr = m.group(); es = new ErrorAndSolution( "Data file " + splitStr + " is corrupted.", "Replace file. i.e. by re-running the query that produced the " + "source table / partition."); } } reset(); return es; } }
@Override public ErrorAndSolution getErrorAndSolution() { ErrorAndSolution es = null; if(getQueryMatches()) { Map<String, List<String>> rll = getRegexToLogLines(); if (rll.get(EXCEPTION_REGEX).size() > 0 && rll.get(SPLIT_REGEX).size() > 0) { // There should only be a single split line... String splitLogLine = rll.get(SPLIT_REGEX).get(0); // Extract only 'split: hdfs://...' Pattern p = Pattern.compile(SPLIT_REGEX, Pattern.CASE_INSENSITIVE); Matcher m = p.matcher(splitLogLine); m.find(); String splitStr = m.group(1); es = new ErrorAndSolution( "Data file " + splitStr + " is corrupted.", "Replace file. i.e. by re-running the query that produced the " + "source table / partition."); } } reset(); return es; } }
String errorCode = m2.group(); es = new ErrorAndSolution( "A user-supplied transfrom script has exited with error code " + errorCode + " instead of 0.",
String errorCode = m2.group(); es = new ErrorAndSolution( "A user-supplied transfrom script has exited with error code " + errorCode + " instead of 0.",
@Override public ErrorAndSolution getErrorAndSolution() { ErrorAndSolution es = null; if(getQueryMatches() && configMatches) { List<String> matchingLines = getRegexToLogLines().get(OUT_OF_MEMORY_REGEX); if (matchingLines.size() > 0) { String confName = HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY.toString(); float confValue = HiveConf.getFloatVar(getConf(), HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY); es = new ErrorAndSolution( "Out of memory due to hash maps used in map-side aggregation.", "Currently " + confName + " is set to " + confValue + ". " + "Try setting it to a lower value. i.e " + "'set " + confName + " = " + confValue/2 + ";'"); } } reset(); return es; } }
@Override public ErrorAndSolution getErrorAndSolution() { ErrorAndSolution es = null; if(getQueryMatches() && configMatches) { List<String> matchingLines = getRegexToLogLines().get(OUT_OF_MEMORY_REGEX); if (matchingLines.size() > 0) { String confName = HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY.toString(); float confValue = HiveConf.getFloatVar(getConf(), HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY); es = new ErrorAndSolution( "Out of memory due to hash maps used in map-side aggregation.", "Currently " + confName + " is set to " + confValue + ". " + "Try setting it to a lower value. i.e " + "'set " + confName + " = " + confValue/2 + ";'"); } } reset(); return es; } }