/** * Calculate the precision with respect to a particular class. This is defined * as * <p/> * * <pre> * correctly classified positives * ------------------------------ * total predicted as positive * </pre> * * @param classIndex the index of the class to consider as "positive" * @return the precision */ public double precision(int classIndex) { return m_delegate.precision(classIndex); }
/** * Calculate the precision with respect to a particular class. This is defined * as * <p/> * * <pre> * correctly classified positives * ------------------------------ * total predicted as positive * </pre> * * @param classIndex the index of the class to consider as "positive" * @return the precision */ public double precision(int classIndex) { return m_delegate.precision(classIndex); }
/** * Calculates the weighted (by class size) precision. * * @return the weighted precision. */ public double weightedPrecision() { double[] classCounts = new double[m_NumClasses]; double classCountSum = 0; for (int i = 0; i < m_NumClasses; i++) { for (int j = 0; j < m_NumClasses; j++) { classCounts[i] += m_ConfusionMatrix[i][j]; } classCountSum += classCounts[i]; } double precisionTotal = 0; for (int i = 0; i < m_NumClasses; i++) { double temp = precision(i); if (classCounts[i] > 0) { // If temp is NaN, we want the sum to also be NaN if count > 0 precisionTotal += (temp * classCounts[i]); } } return precisionTotal / classCountSum; }
/** * Calculates the weighted (by class size) precision. * * @return the weighted precision. */ public double weightedPrecision() { double[] classCounts = new double[m_NumClasses]; double classCountSum = 0; for (int i = 0; i < m_NumClasses; i++) { for (int j = 0; j < m_NumClasses; j++) { classCounts[i] += m_ConfusionMatrix[i][j]; } classCountSum += classCounts[i]; } double precisionTotal = 0; for (int i = 0; i < m_NumClasses; i++) { double temp = precision(i); if (classCounts[i] > 0) { // If temp is NaN, we want the sum to also be NaN if count > 0 precisionTotal += (temp * classCounts[i]); } } return precisionTotal / classCountSum; }
/** * Calculate the F-Measure with respect to a particular class. This is defined * as * <p/> * * <pre> * 2 * recall * precision * ---------------------- * recall + precision * </pre> * * Returns zero when both precision and recall are zero * * @param classIndex the index of the class to consider as "positive" * @return the F-Measure */ public double fMeasure(int classIndex) { double precision = precision(classIndex); double recall = recall(classIndex); if ((precision == 0) && (recall == 0)) { return 0; } return 2 * precision * recall / (precision + recall); }
/** * Calculate the F-Measure with respect to a particular class. This is defined * as * <p/> * * <pre> * 2 * recall * precision * ---------------------- * recall + precision * </pre> * * Returns zero when both precision and recall are zero * * @param classIndex the index of the class to consider as "positive" * @return the F-Measure */ public double fMeasure(int classIndex) { double precision = precision(classIndex); double recall = recall(classIndex); if ((precision == 0) && (recall == 0)) { return 0; } return 2 * precision * recall / (precision + recall); }
vals[offset++] = eval.truePositiveRate(i); vals[offset++] = eval.falseNegativeRate(i); vals[offset++] = eval.precision(i); vals[offset++] = eval.recall(i); vals[offset++] = eval.fMeasure(i);
.weightedFalsePositiveRate(); case 19: return hasValIndex ? m_eval.precision(classValIndex[0]) : m_eval .weightedPrecision(); case 20:
.weightedFalsePositiveRate(); case 19: return hasValIndex ? m_eval.precision(classValIndex[0]) : m_eval .weightedPrecision(); case 20: