Evaluation eval = new Evaluation(train, costMatrix); m_Classifier = AbstractClassifier.makeCopy(m_Template); eval.evaluateModel(m_Classifier, test); if (canMeasureCPUTime) { testCPUTimeElapsed = thMonitor.getThreadUserTime(thID) - CPUStartTime; m_result = eval.toSummaryString(); result[current++] = new Double(eval.numInstances()); result[current++] = new Double(eval.correct()); result[current++] = new Double(eval.incorrect()); result[current++] = new Double(eval.unclassified()); result[current++] = new Double(eval.pctCorrect()); result[current++] = new Double(eval.pctIncorrect()); result[current++] = new Double(eval.pctUnclassified()); result[current++] = new Double(eval.totalCost()); result[current++] = new Double(eval.avgCost()); result[current++] = new Double(eval.meanAbsoluteError()); result[current++] = new Double(eval.rootMeanSquaredError()); result[current++] = new Double(eval.relativeAbsoluteError()); result[current++] = new Double(eval.rootRelativeSquaredError()); result[current++] = new Double(eval.SFPriorEntropy()); result[current++] = new Double(eval.SFSchemeEntropy()); result[current++] = new Double(eval.SFEntropyGain()); result[current++] = new Double(eval.SFMeanPriorEntropy());
Evaluation evaluation = new Evaluation(dataSet.getData()); evaluation.crossValidateModel(classifier, dataSet.getData(), this.numFolds, new Random(1)); result.append("evaluation summary:"); result.append("\n"); result.append(evaluation.toSummaryString()); result.append("detailed accuracy:"); result.append("\n"); result.append(evaluation.toClassDetailsString());
switch (id) { case DefaultEvaluationMetrics.EVALUATION_CC: return m_Evaluation.correlationCoefficient(); case DefaultEvaluationMetrics.EVALUATION_MATTHEWS_CC: return m_Evaluation.matthewsCorrelationCoefficient(0); case DefaultEvaluationMetrics.EVALUATION_RMSE: return m_Evaluation.rootMeanSquaredError(); case DefaultEvaluationMetrics.EVALUATION_RRSE: return m_Evaluation.rootRelativeSquaredError(); case DefaultEvaluationMetrics.EVALUATION_MAE: return m_Evaluation.meanAbsoluteError(); case DefaultEvaluationMetrics.EVALUATION_RAE: return m_Evaluation.relativeAbsoluteError(); case DefaultEvaluationMetrics.EVALUATION_COMBINED: return (1 - StrictMath.abs(m_Evaluation.correlationCoefficient()) + m_Evaluation.rootRelativeSquaredError() + m_Evaluation.relativeAbsoluteError()); case DefaultEvaluationMetrics.EVALUATION_ACC: return m_Evaluation.pctCorrect(); case DefaultEvaluationMetrics.EVALUATION_KAPPA: return m_Evaluation.kappa(); case DefaultEvaluationMetrics.EVALUATION_PRECISION: return m_Evaluation.precision(classLabel); case DefaultEvaluationMetrics.EVALUATION_WEIGHTED_PRECISION: return m_Evaluation.weightedPrecision(); case DefaultEvaluationMetrics.EVALUATION_RECALL: return m_Evaluation.recall(classLabel); case DefaultEvaluationMetrics.EVALUATION_WEIGHTED_RECALL: return m_Evaluation.weightedRecall(); case DefaultEvaluationMetrics.EVALUATION_AUC: return m_Evaluation.areaUnderROC(classLabel);
/** * Returns the error of the probability estimates for the current model on a * set of instances. * * @param data the set of instances * @return the error * @throws Exception if something goes wrong */ protected double getMeanAbsoluteError(Instances data) throws Exception { Evaluation eval = new Evaluation(data); eval.evaluateModel(this, data); return eval.meanAbsoluteError(); }
/** * Returns the misclassification error of the current model on a set of * instances. * * @param data the set of instances * @return the error rate * @throws Exception if something goes wrong */ protected double getErrorRate(Instances data) throws Exception { Evaluation eval = new Evaluation(data); eval.evaluateModel(this, data); return eval.errorRate(); }
/** * Evaluates a given single-label classifier on given train and test sets. * * @param cl * single-label classifier, needs to be trained beforehand * @param trainData * @param testData * @return * @throws Exception */ public static Evaluation getEvaluationSinglelabel(Classifier cl, Instances trainData, Instances testData) throws Exception { Evaluation eval = new Evaluation(trainData); eval.evaluateModel(cl, testData); return eval; }
Evaluation eval = new Evaluation(train); m_Classifier = AbstractClassifier.makeCopy(m_Template); double[] predictions; predictions = eval.evaluateModel(m_Classifier, test); if (canMeasureCPUTime) { testCPUTimeElapsed = thMonitor.getThreadUserTime(thID) - CPUStartTime; m_result = eval.toSummaryString(); result[current++] = new Double(eval.numInstances()); result[current++] = new Double(eval.correct()); result[current++] = new Double(eval.incorrect()); result[current++] = new Double(eval.unclassified()); result[current++] = new Double(eval.pctCorrect()); result[current++] = new Double(eval.pctIncorrect()); result[current++] = new Double(eval.pctUnclassified()); result[current++] = new Double(eval.kappa()); result[current++] = new Double(eval.meanAbsoluteError()); result[current++] = new Double(eval.rootMeanSquaredError()); result[current++] = new Double(eval.relativeAbsoluteError()); result[current++] = new Double(eval.rootRelativeSquaredError()); result[current++] = new Double(eval.SFPriorEntropy()); result[current++] = new Double(eval.SFSchemeEntropy()); result[current++] = new Double(eval.SFEntropyGain()); result[current++] = new Double(eval.SFMeanPriorEntropy()); result[current++] = new Double(eval.SFMeanSchemeEntropy());
m_Evaluation = new Evaluation(trainCopy); m_Evaluation.crossValidateModel(m_BaseClassifier, trainCopy, m_folds, Rnd); repError[i] = m_Evaluation.errorRate(); break; case EVAL_ACCURACY: repError[i] = m_Evaluation.errorRate(); break; case EVAL_RMSE: repError[i] = m_Evaluation.rootMeanSquaredError(); break; case EVAL_MAE: repError[i] = m_Evaluation.meanAbsoluteError(); break; case EVAL_FMEASURE: if (m_IRClassVal < 0) { repError[i] = m_Evaluation.weightedFMeasure(); } else { repError[i] = m_Evaluation.fMeasure(m_IRClassVal); repError[i] = m_Evaluation.weightedAreaUnderROC(); } else { repError[i] = m_Evaluation.areaUnderROC(m_IRClassVal); repError[i] = m_Evaluation.weightedAreaUnderPRC(); } else { repError[i] = m_Evaluation.areaUnderPRC(m_IRClassVal);
long thID = Thread.currentThread().getId(); long CPUStartTime = -1, trainCPUTimeElapsed = -1, testCPUTimeElapsed = -1, trainTimeStart, trainTimeElapsed, testTimeStart, testTimeElapsed; Evaluation eval = new Evaluation(train); m_Classifier = AbstractClassifier.makeCopy(m_Template); eval.evaluateModel(m_Classifier, test); if (canMeasureCPUTime) { testCPUTimeElapsed = thMonitor.getThreadUserTime(thID) - CPUStartTime; m_result = eval.toSummaryString(); result[current++] = new Double(eval.numInstances()); result[current++] = new Double(eval.meanAbsoluteError()); result[current++] = new Double(eval.rootMeanSquaredError()); result[current++] = new Double(eval.relativeAbsoluteError()); result[current++] = new Double(eval.rootRelativeSquaredError()); result[current++] = new Double(eval.correlationCoefficient()); result[current++] = new Double(eval.unclassified()); result[current++] = new Double(eval.pctUnclassified()); result[current++] = new Double(eval.SFPriorEntropy()); result[current++] = new Double(eval.SFSchemeEntropy()); result[current++] = new Double(eval.SFEntropyGain()); result[current++] = new Double(eval.SFMeanPriorEntropy()); result[current++] = new Double(eval.SFMeanSchemeEntropy()); result[current++] = new Double(eval.SFMeanEntropyGain()); new Double(eval.coverageOfTestCasesByPredictedRegions());
final Evaluation evaluation = new Evaluation(testData); evaluation.evaluateModel(classifier, testData); if(verbose) IJ.log(evaluation.toSummaryString("\n=== Test data evaluation ===\n", false)); IJ.log(evaluation.toClassDetailsString() + "\n"); IJ.log(evaluation.toMatrixString()); error = evaluation.errorRate(); } catch (Exception e) {
filteredClassifier.buildClassifier(train); Evaluation eval = new Evaluation(train); eval.evaluateModel(filteredClassifier, test); System.out.println(eval.toSummaryString()); System.out.println(eval.toMatrixString());
m_eval = new Evaluation(ce.getStructure()); m_eval.useNoPriors(); m_windowEval = new Evaluation(ce.getStructure()); m_windowEval.useNoPriors(); m_windowedPreds = new LinkedList<double[]>(); m_eval.evaluateModelOnceAndRecordPrediction(dist, inst); } else { m_eval.evaluateModelOnce(dist, inst); m_windowEval.evaluateModelOnce(dist, inst); m_window.addFirst(inst); m_windowedPreds.addFirst(dist); m_windowEval.evaluateModelOnce(oldDist, oldest); oldest.setWeight(-oldest.weight()); m_dataPoint[1] = m_windowEval.rootMeanSquaredError(); m_dataPoint[2] = m_windowEval.kappa(); } else { m_dataPoint[1] = m_eval.rootMeanSquaredError(); m_dataPoint[2] = m_eval.kappa(); if (!inst.isMissing(inst.classIndex())) { if (m_windowSize > 0) { primaryMeasure = 1.0 - m_windowEval.errorRate(); } else { primaryMeasure = 1.0 - m_eval.errorRate();
eTest = new Evaluation(trainingData); eTest.evaluateModel(classifier, trainingData); double tp = eTest.weightedTruePositiveRate(); double tn = eTest.weightedTrueNegativeRate(); double prec = eTest.weightedPrecision(); double rec = eTest.weightedRecall(); double f1 = eTest.weightedFMeasure(); double accuracy = (eTest.correct()) / (eTest.incorrect() + eTest.correct());
Evaluation evaluation = new Evaluation(trainData); Instances test = trainData.testCV(m_NumFolds, j); copiedClassifier.buildClassifier(train); evaluation.setPriors(train); evaluation.evaluateModel(copiedClassifier, test); double error = evaluation.errorRate(); if (m_Debug) { System.err.println("Cross-validated error rate: "
evaluation1A = new Evaluation(train1); evaluation1B = new Evaluation(train1); evaluation2 = new Evaluation(train2); } catch (Exception ex) { throw new Error("Error setting up for tests: " + ex.getMessage()); if (!evaluation1A.equals(evaluation1B)) { if (m_Debug) { println("\n=== Full report ===\n" + evaluation1A.toSummaryString("\nFirst buildClassifier()", true) + "\n\n"); println(evaluation1B.toSummaryString("\nSecond buildClassifier()", true) + "\n\n");
/** * Traverses the tree and installs linear models at each node. This method * must be called if pruning is not to be performed. * * @throws Exception if an error occurs */ public void installLinearModels() throws Exception { Evaluation nodeModelEval; if (m_isLeaf) { buildLinearModel(m_indices); } else { if (m_left != null) { m_left.installLinearModels(); } if (m_right != null) { m_right.installLinearModels(); } buildLinearModel(m_indices); } nodeModelEval = new Evaluation(m_instances); nodeModelEval.evaluateModel(m_nodeModel, m_instances); m_rootMeanSquaredError = nodeModelEval.rootMeanSquaredError(); // save space if (!m_saveInstances) { m_instances = new Instances(m_instances, 0); } }
Evaluation eval = new Evaluation(trainingSet); eval.evaluateModel(copies[j], test); return eval.incorrect();
m_Log.logMessage("Re-evaluating classifier (" + name + ") on test set"); eval = new Evaluation(userTestStructure, costMatrix); eval.setMetricsToDisplay(m_selectedEvalMetrics); trainHeader != null ? trainHeader : userTestStructure, costMatrix, plotInstances, classificationOutput, false); eval.useNoPriors(); plotInstances.setUp(); outBuff.append(eval.toSummaryString(outputEntropy) + "\n"); outBuff.append(eval.toClassDetailsString() + "\n"); outBuff.append(eval.toMatrixString() + "\n"); vv.add(grph); if ((eval != null) && (eval.predictions() != null)) { vv.add(eval.predictions()); vv.add(userTestStructure.classAttribute());
case 3: // Test on training m_Log.statusMessage("Evaluating on training data..."); eval = new Evaluation(inst, costMatrix); setupEval(eval, classifier, inst, costMatrix, plotInstances, classificationOutput, false); eval.setMetricsToDisplay(m_selectedEvalMetrics); eval = new Evaluation(inst, costMatrix); setupEval(eval, classifier, inst, costMatrix, plotInstances, classificationOutput, false); eval.setMetricsToDisplay(m_selectedEvalMetrics); setupEval(eval, classifier, train, costMatrix, plotInstances, classificationOutput, true); eval.setMetricsToDisplay(m_selectedEvalMetrics); eval = new Evaluation(train, costMatrix); setupEval(eval, classifier, train, costMatrix, plotInstances, classificationOutput, false); eval.setMetricsToDisplay(m_selectedEvalMetrics); eval = new Evaluation(inst, costMatrix); classificationOutput, false); plotInstances.setInstances(userTestStructure); eval.setMetricsToDisplay(m_selectedEvalMetrics);
.read(evaluationFile.getAbsolutePath()); results.put(CORRECT, eval.correct()); results.put(INCORRECT, eval.incorrect()); results.put(PCT_CORRECT, eval.pctCorrect()); results.put(PCT_INCORRECT, eval.pctIncorrect()); results.put(PCT_UNCLASSIFIED, eval.pctUnclassified()); results.put(WGT_FMEASURE, eval.weightedFMeasure()); results.put(WGT_PRECISION, eval.weightedPrecision()); results.put(WGT_RECALL, eval.weightedRecall()); double recall = eval.recall(eval.getHeader() .attribute(eval.getHeader().classIndex()).indexOfValue(label)); double precision = eval.precision(eval.getHeader() .attribute(eval.getHeader().classIndex()).indexOfValue(label)); double fmeasure = eval.fMeasure(eval.getHeader() .attribute(eval.getHeader().classIndex()).indexOfValue(label)); results.put(RECALL + "_" + label, recall); results.put(PRECISION + "_" + label, precision); predictedLabelsList.add(label); confusionMatrix = eval.confusionMatrix();