.append(" subsetMiniBatches ").append(iterAfter - itersBefore) //Note: "end of epoch" effect - may be smaller than subset size .append(" trainMS ").append(end - start).append(" evalMS ").append(endEval - startEval) .append(" accuracy ").append(e.accuracy()).append(" f1 ").append(e.f1()) .append(" AvgAUC ").append(r.calculateAverageAUC()).append(" AvgAUPRC ").append(r.calculateAverageAUCPR()).append("\n");
/** * Calculate the (macro) average F1 score across all classes * * TP: true positive * FP: False Positive * FN: False Negative * F1 score: 2 * TP / (2TP + FP + FN) * * @return the f1 score or harmonic mean of precision and recall based on current guesses */ public double f1() { return f1(EvaluationAveraging.Macro); }
@Override public double score(MultiLayerNetwork net, DataSetIterator iterator) { Evaluation e = net.evaluate(iterator); return e.f1(); }
@Override public double score(ComputationGraph graph, MultiDataSetIterator iterator) { Evaluation e = graph.evaluate(iterator); return e.f1(); } }
@Override public double score(ComputationGraph graph, DataSetIterator iterator) { Evaluation e = graph.evaluate(iterator); return e.f1(); }
/** * Returns the f1 score for the given examples. * Think of this to be like a percentage right. * The higher the number the more it got right. * This is on a scale from 0 to 1. * * @param examples te the examples to classify (one example in each row) * @param labels the true labels * @return the scores for each ndarray */ @Override public double f1Score(INDArray examples, INDArray labels) { Evaluation eval = new Evaluation(); eval.eval(labels, labelProbabilities(examples)); return eval.f1(); }
/** * Returns the f1 score for the given examples. * Think of this to be like a percentage right. * The higher the number the more it got right. * This is on a scale from 0 to 1. * * @param examples te the examples to classify (one example in each row) * @param labels the true labels * @return the scores for each ndarray */ @Override public double f1Score(INDArray examples, INDArray labels) { Evaluation eval = new Evaluation(); eval.eval(labels, labelProbabilities(examples)); return eval.f1(); }
double precisionMacro = precision(EvaluationAveraging.Macro); double recallMacro = recall(EvaluationAveraging.Macro); double f1Macro = f1(EvaluationAveraging.Macro); builder.append("\n==========================Scores========================================"); builder.append("\n # of classes: ").append(nClasses);
/** * Sets the input and labels and returns a score for the prediction * wrt true labels * * @param input the input to score * @param labels the true labels * @return the score for the given input,label pairs */ @Override public double f1Score(INDArray input, INDArray labels) { feedForward(input); setLabels(labels); Evaluation eval = new Evaluation(); eval.eval(labels, labelProbabilities(input)); return eval.f1(); }