g.addMeasure("Variance Ratio Criteria", vrc, 0., 1., 0., false); return vrc;
g.addMeasure("Mean distance", sum / div, 0., Double.POSITIVE_INFINITY, true); g.addMeasure("Sum of Squares", ssq, 0., Double.POSITIVE_INFINITY, true); g.addMeasure("RMSD", Math.sqrt(ssq / div), 0., Double.POSITIVE_INFINITY, true); db.getHierarchy().add(c, ev); return ssq;
MeasurementGroup g = ev.findOrCreateGroup("Evaluation measures"); if(!g.hasMeasure(ROCAUC_LABEL)) { g.addMeasure(ROCAUC_LABEL, rocres.auc, 0., 1., false); MeasurementGroup g = ev.findOrCreateGroup("Evaluation measures"); if(!g.hasMeasure(ROCAUC_LABEL)) { g.addMeasure(ROCAUC_LABEL, rocres.auc, 0., 1., false);
double rocauc = ROCEvaluation.STATIC.evaluate(test, new OutlierScoreAdapter(or)); if(!g.hasMeasure("ROC AUC")) { g.addMeasure("ROC AUC", rocauc, 0., 1., .5, false); g.addMeasure("Average Precision", avep, 0., 1., rate, false); double rprec = PrecisionAtKEvaluation.RPRECISION.evaluate(test, new OutlierScoreAdapter(or)); g.addMeasure("R-Precision", rprec, 0., 1., rate, false); double maxf1 = MaximumF1Evaluation.STATIC.evaluate(test, new OutlierScoreAdapter(or)); g.addMeasure("Maximum F1", maxf1, 0., 1., rate, false); double maxdcg = DCGEvaluation.maximum(pos); double dcg = DCGEvaluation.STATIC.evaluate(test, new OutlierScoreAdapter(or)); g.addMeasure("DCG", dcg, 0., maxdcg, DCGEvaluation.STATIC.expected(pos, size), false); double ndcg = NDCGEvaluation.STATIC.evaluate(test, new OutlierScoreAdapter(or)); g.addMeasure("NDCG", ndcg, 0., 1., NDCGEvaluation.STATIC.expected(pos, size), false); g.addMeasure("Adjusted AUC", adjauc, 0., 1., 0., false); double adjavep = (avep - rate) / (1 - rate); g.addMeasure("Adjusted AveP", adjavep, 0., 1., 0., false); double adjrprec = (rprec - rate) / (1 - rate); g.addMeasure("Adjusted R-Prec", adjrprec, 0., 1., 0., false); double adjmaxf1 = (maxf1 - rate) / (1 - rate); g.addMeasure("Adjusted Max F1", adjmaxf1, 0., 1., 0., false); double endcg = NDCGEvaluation.STATIC.expected(pos, size); double adjndcg = (ndcg - endcg) / (1. - endcg); g.addMeasure("Adjusted DCG", adjndcg, 0., 1., 0., false);
g.addMeasure("Mean distance", sum / div, 0., Double.POSITIVE_INFINITY, true); g.addMeasure("Sum of Squares", ssq, 0., Double.POSITIVE_INFINITY, true); g.addMeasure("RMSD", FastMath.sqrt(ssq / div), 0., Double.POSITIVE_INFINITY, true); db.getHierarchy().add(c, ev); return ssq;
double rocauc = ROCEvaluation.STATIC.evaluate(test, new OutlierScoreAdapter(or)); if(!g.hasMeasure("ROC AUC")) { g.addMeasure("ROC AUC", rocauc, 0., 1., .5, false); g.addMeasure("Average Precision", avep, 0., 1., rate, false); double rprec = PrecisionAtKEvaluation.RPRECISION.evaluate(test, new OutlierScoreAdapter(or)); g.addMeasure("R-Precision", rprec, 0., 1., rate, false); double maxf1 = MaximumF1Evaluation.STATIC.evaluate(test, new OutlierScoreAdapter(or)); g.addMeasure("Maximum F1", maxf1, 0., 1., rate, false); double maxdcg = DCGEvaluation.maximum(pos); double dcg = DCGEvaluation.STATIC.evaluate(test, new OutlierScoreAdapter(or)); g.addMeasure("DCG", dcg, 0., maxdcg, DCGEvaluation.STATIC.expected(pos, size), false); double ndcg = NDCGEvaluation.STATIC.evaluate(test, new OutlierScoreAdapter(or)); g.addMeasure("NDCG", ndcg, 0., 1., NDCGEvaluation.STATIC.expected(pos, size), false); g.addMeasure("Adjusted AUC", adjauc, 0., 1., 0., false); double adjavep = (avep - rate) / (1 - rate); g.addMeasure("Adjusted AveP", adjavep, 0., 1., 0., false); double adjrprec = (rprec - rate) / (1 - rate); g.addMeasure("Adjusted R-Prec", adjrprec, 0., 1., 0., false); double adjmaxf1 = (maxf1 - rate) / (1 - rate); g.addMeasure("Adjusted Max F1", adjmaxf1, 0., 1., 0., false); double endcg = NDCGEvaluation.STATIC.expected(pos, size); double adjndcg = (ndcg - endcg) / (1. - endcg); g.addMeasure("Adjusted DCG", adjndcg, 0., 1., 0., false);
g.addMeasure("Mean distance", sum / div, 0., Double.POSITIVE_INFINITY, true); g.addMeasure("Sum of Squares", ssq, 0., Double.POSITIVE_INFINITY, true); g.addMeasure("RMSD", FastMath.sqrt(ssq / div), 0., Double.POSITIVE_INFINITY, true); db.getHierarchy().add(c, ev); return ssq;
MeasurementGroup g = ev.findOrCreateGroup("Evaluation measures"); if(!g.hasMeasure(ROCAUC_LABEL)) { g.addMeasure(ROCAUC_LABEL, rocres.auc, 0., 1., false); MeasurementGroup g = ev.findOrCreateGroup("Evaluation measures"); if(!g.hasMeasure(ROCAUC_LABEL)) { g.addMeasure(ROCAUC_LABEL, rocres.auc, 0., 1., false);
MeasurementGroup g = res.newGroup("Evaluation measures:"); double rocauc = ROCEvaluation.STATIC.evaluate(test, new SimpleAdapter(order.iter())); g.addMeasure("ROC AUC", rocauc, 0., 1., .5, false); double avep = AveragePrecisionEvaluation.STATIC.evaluate(test, new SimpleAdapter(order.iter())); g.addMeasure("Average Precision", avep, 0., 1., rate, false); double rprec = PrecisionAtKEvaluation.RPRECISION.evaluate(test, new SimpleAdapter(order.iter())); g.addMeasure("R-Precision", rprec, 0., 1., rate, false); double maxf1 = MaximumF1Evaluation.STATIC.evaluate(test, new SimpleAdapter(order.iter())); g.addMeasure("Maximum F1", maxf1, 0., 1., rate, false); g.addMeasure("Adjusted AUC", adjauc, 0., 1., 0., false); double adjavep = (avep - rate) / (1 - rate); g.addMeasure("Adjusted AveP", adjavep, 0., 1., 0., false); double adjrprec = (rprec - rate) / (1 - rate); g.addMeasure("Adjusted R-Prec", adjrprec, 0., 1., 0., false); double adjmaxf1 = (maxf1 - rate) / (1 - rate); g.addMeasure("Adjusted Max F1", adjmaxf1, 0., 1., 0., false);
g.addMeasure("Jaccard", paircount.jaccard(), 0, 1, false); g.addMeasure("F1-Measure", paircount.f1Measure(), 0, 1, false); g.addMeasure("Precision", paircount.precision(), 0, 1, false); g.addMeasure("Recall", paircount.recall(), 0, 1, false); g.addMeasure("Rand", paircount.randIndex(), 0, 1, false); g.addMeasure("ARI", paircount.adjustedRandIndex(), 0, 1, false); g.addMeasure("FowlkesMallows", paircount.fowlkesMallows(), 0, 1, false); g.addMeasure("NMI Joint", entropy.entropyNMIJoint(), 0, 1, false); g.addMeasure("NMI Sqrt", entropy.entropyNMISqrt(), 0, 1, false); g.addMeasure("F1-Measure", bcubed.f1Measure(), 0, 1, false); g.addMeasure("Recall", bcubed.recall(), 0, 1, false); g.addMeasure("Precision", bcubed.precision(), 0, 1, false); g.addMeasure("F1-Measure", setm.f1Measure(), 0, 1, false); g.addMeasure("Purity", setm.purity(), 0, 1, false); g.addMeasure("Inverse Purity", setm.inversePurity(), 0, 1, false); g.addMeasure("F1-Measure", edit.f1Measure(), 0, 1, false); g.addMeasure("Precision", edit.editDistanceFirst(), 0, 1, false); g.addMeasure("Recall", edit.editDistanceSecond(), 0, 1, false); g.addMeasure("Mean +-" + FormatUtil.NF4.format(gini.getCount() > 1. ? gini.getSampleStddev() : 0.), gini.getMean(), 0, 1, false);
g.addMeasure("Jaccard", paircount.jaccard(), 0, 1, false); g.addMeasure("F1-Measure", paircount.f1Measure(), 0, 1, false); g.addMeasure("Precision", paircount.precision(), 0, 1, false); g.addMeasure("Recall", paircount.recall(), 0, 1, false); g.addMeasure("Rand", paircount.randIndex(), 0, 1, false); g.addMeasure("ARI", paircount.adjustedRandIndex(), 0, 1, false); g.addMeasure("FowlkesMallows", paircount.fowlkesMallows(), 0, 1, false); g.addMeasure("NMI Joint", entropy.entropyNMIJoint(), 0, 1, false); g.addMeasure("NMI Sqrt", entropy.entropyNMISqrt(), 0, 1, false); g.addMeasure("F1-Measure", bcubed.f1Measure(), 0, 1, false); g.addMeasure("Recall", bcubed.recall(), 0, 1, false); g.addMeasure("Precision", bcubed.precision(), 0, 1, false); g.addMeasure("F1-Measure", setm.f1Measure(), 0, 1, false); g.addMeasure("Purity", setm.purity(), 0, 1, false); g.addMeasure("Inverse Purity", setm.inversePurity(), 0, 1, false); g.addMeasure("F1-Measure", edit.f1Measure(), 0, 1, false); g.addMeasure("Precision", edit.editDistanceFirst(), 0, 1, false); g.addMeasure("Recall", edit.editDistanceSecond(), 0, 1, false); g.addMeasure("Mean +-" + FormatUtil.NF4.format(gini.getCount() > 1. ? gini.getSampleStddev() : 0.), gini.getMean(), 0, 1, false);
g.addMeasure("Jaccard", paircount.jaccard(), 0, 1, false); g.addMeasure("F1-Measure", paircount.f1Measure(), 0, 1, false); g.addMeasure("Precision", paircount.precision(), 0, 1, false); g.addMeasure("Recall", paircount.recall(), 0, 1, false); g.addMeasure("Rand", paircount.randIndex(), 0, 1, false); g.addMeasure("ARI", paircount.adjustedRandIndex(), 0, 1, false); g.addMeasure("FowlkesMallows", paircount.fowlkesMallows(), 0, 1, false); g.addMeasure("NMI Joint", entropy.entropyNMIJoint(), 0, 1, false); g.addMeasure("NMI Sqrt", entropy.entropyNMISqrt(), 0, 1, false); g.addMeasure("F1-Measure", bcubed.f1Measure(), 0, 1, false); g.addMeasure("Recall", bcubed.recall(), 0, 1, false); g.addMeasure("Precision", bcubed.precision(), 0, 1, false); g.addMeasure("F1-Measure", setm.f1Measure(), 0, 1, false); g.addMeasure("Purity", setm.purity(), 0, 1, false); g.addMeasure("Inverse Purity", setm.inversePurity(), 0, 1, false); g.addMeasure("F1-Measure", edit.f1Measure(), 0, 1, false); g.addMeasure("Precision", edit.editDistanceFirst(), 0, 1, false); g.addMeasure("Recall", edit.editDistanceSecond(), 0, 1, false); g.addMeasure("Mean +-" + FormatUtil.NF4.format(gini.getCount() > 1. ? gini.getSampleStddev() : 0.), gini.getMean(), 0, 1, false);
MeasurementGroup g = res.newGroup("Evaluation measures:"); double rocauc = ROCEvaluation.STATIC.evaluate(test, new SimpleAdapter(order.iter())); g.addMeasure("ROC AUC", rocauc, 0., 1., .5, false); double avep = AveragePrecisionEvaluation.STATIC.evaluate(test, new SimpleAdapter(order.iter())); g.addMeasure("Average Precision", avep, 0., 1., rate, false); double rprec = PrecisionAtKEvaluation.RPRECISION.evaluate(test, new SimpleAdapter(order.iter())); g.addMeasure("R-Precision", rprec, 0., 1., rate, false); double maxf1 = MaximumF1Evaluation.STATIC.evaluate(test, new SimpleAdapter(order.iter())); g.addMeasure("Maximum F1", maxf1, 0., 1., rate, false); g.addMeasure("Adjusted AUC", adjauc, 0., 1., 0., false); double adjavep = (avep - rate) / (1 - rate); g.addMeasure("Adjusted AveP", adjavep, 0., 1., 0., false); double adjrprec = (rprec - rate) / (1 - rate); g.addMeasure("Adjusted R-Prec", adjrprec, 0., 1., 0., false); double adjmaxf1 = (maxf1 - rate) / (1 - rate); g.addMeasure("Adjusted Max F1", adjmaxf1, 0., 1., 0., false);
MeasurementGroup g = res.newGroup("Evaluation measures:"); double rocauc = ROCEvaluation.STATIC.evaluate(test, new SimpleAdapter(order.iter())); g.addMeasure("ROC AUC", rocauc, 0., 1., .5, false); double avep = AveragePrecisionEvaluation.STATIC.evaluate(test, new SimpleAdapter(order.iter())); g.addMeasure("Average Precision", avep, 0., 1., rate, false); double rprec = PrecisionAtKEvaluation.RPRECISION.evaluate(test, new SimpleAdapter(order.iter())); g.addMeasure("R-Precision", rprec, 0., 1., rate, false); double maxf1 = MaximumF1Evaluation.STATIC.evaluate(test, new SimpleAdapter(order.iter())); g.addMeasure("Maximum F1", maxf1, 0., 1., rate, false); g.addMeasure("Adjusted AUC", adjauc, 0., 1., 0., false); double adjavep = (avep - rate) / (1 - rate); g.addMeasure("Adjusted AveP", adjavep, 0., 1., 0., false); double adjrprec = (rprec - rate) / (1 - rate); g.addMeasure("Adjusted R-Prec", adjrprec, 0., 1., 0., false); double adjmaxf1 = (maxf1 - rate) / (1 - rate); g.addMeasure("Adjusted Max F1", adjmaxf1, 0., 1., 0., false);
MeasurementGroup g = res.newGroup("Evaluation measures:"); double rocauc = ROCEvaluation.STATIC.evaluate(test, new OutlierScoreAdapter(or)); g.addMeasure("ROC AUC", rocauc, 0., 1., .5, false); double avep = AveragePrecisionEvaluation.STATIC.evaluate(test, new OutlierScoreAdapter(or)); g.addMeasure("Average Precision", avep, 0., 1., rate, false); double rprec = PrecisionAtKEvaluation.RPRECISION.evaluate(test, new OutlierScoreAdapter(or)); g.addMeasure("R-Precision", rprec, 0., 1., rate, false); double maxf1 = MaximumF1Evaluation.STATIC.evaluate(test, new OutlierScoreAdapter(or)); g.addMeasure("Maximum F1", maxf1, 0., 1., rate, false); g.addMeasure("Adjusted AUC", adjauc, 0., 1., 0., false); double adjavep = (avep - rate) / (1 - rate); g.addMeasure("Adjusted AveP", adjavep, 0., 1., 0., false); double adjrprec = (rprec - rate) / (1 - rate); g.addMeasure("Adjusted R-Prec", adjrprec, 0., 1., 0., false); double adjmaxf1 = (maxf1 - rate) / (1 - rate); g.addMeasure("Adjusted Max F1", adjmaxf1, 0., 1., 0., false);
db.getHierarchy().add(o, curve); EvaluationResult ev = EvaluationResult.findOrCreate(db.getHierarchy(), o, "Evaluation of ranking", "ranking-evaluation"); ev.findOrCreateGroup("Evaluation measures").addMeasure(PRAUC_LABEL, curve.getAUC(), 0., 1., false); db.getHierarchy().add(or, curve); EvaluationResult ev = EvaluationResult.findOrCreate(db.getHierarchy(), or, "Evaluation of ranking", "ranking-evaluation"); ev.findOrCreateGroup("Evaluation measures").addMeasure(PRAUC_LABEL, curve.getAUC(), 0., 1., false);
db.getHierarchy().add(o, curve); EvaluationResult ev = EvaluationResult.findOrCreate(db.getHierarchy(), o, "Evaluation of ranking", "ranking-evaluation"); ev.findOrCreateGroup("Evaluation measures").addMeasure(PRAUC_LABEL, curve.getAUC(), 0., 1., false); db.getHierarchy().add(or, curve); EvaluationResult ev = EvaluationResult.findOrCreate(db.getHierarchy(), or, "Evaluation of ranking", "ranking-evaluation"); ev.findOrCreateGroup("Evaluation measures").addMeasure(PRAUC_LABEL, curve.getAUC(), 0., 1., false);
/** * Constructor. * * @param gtau Global tau * @param besttau Within cluster Tau * @param cprob Confidence probability */ public RepresentativenessEvaluation(double gtau, double besttau, double cprob) { super("Possible-Worlds Evaluation", "representativeness"); MeasurementGroup g = newGroup("Representativeness"); g.addMeasure("Confidence", cprob, 0, 1, false); g.addMeasure("Global Tau", gtau, 0, 1, true); g.addMeasure("Cluster Tau", besttau, 0, 1, true); }
/** * Constructor. * * @param gtau Global tau * @param besttau Within cluster Tau * @param cprob Confidence probability */ public RepresentativenessEvaluation(double gtau, double besttau, double cprob) { super("Possible-Worlds Evaluation", "representativeness"); MeasurementGroup g = newGroup("Representativeness"); g.addMeasure("Confidence", cprob, 0, 1, false); g.addMeasure("Global Tau", gtau, 0, 1, true); g.addMeasure("Cluster Tau", besttau, 0, 1, true); }
/** * Constructor. * * @param gtau Global tau * @param besttau Within cluster Tau * @param cprob Confidence probability */ public RepresentativenessEvaluation(double gtau, double besttau, double cprob) { super("Possible-Worlds Evaluation", "representativeness"); MeasurementGroup g = newGroup("Representativeness"); g.addMeasure("Confidence", cprob, 0, 1, false); g.addMeasure("Global Tau", gtau, 0, 1, true); g.addMeasure("Cluster Tau", besttau, 0, 1, true); }