DataSet ds = mnistTest.next(); INDArray output = model.output(ds.getFeatures(), false); eval.eval(ds.getLabels(), output);
DataSet ds = mnistTest.next(); INDArray output = model.output(ds.getFeatures(), false); eval.eval(ds.getLabels(), output);
DataSet ds = mnistTest.next(); INDArray output = model.output(ds.getFeatures(), false); eval.eval(ds.getLabels(), output);
/** * Collects statistics on the real outcomes vs the * guesses. This is for logistic outcome matrices. * <p> * Note that an IllegalArgumentException is thrown if the two passed in * matrices aren't the same length. * * @param realOutcomes the real outcomes (labels - usually binary) * @param guesses the guesses/prediction (usually a probability vector) */ public void eval(INDArray realOutcomes, INDArray guesses) { eval(realOutcomes, guesses, (List<Serializable>) null); }
/** * Evaluate the output * using the given true labels, * the input to the multi layer network * and the multi layer network to * use for evaluation * @param trueLabels the labels to ise * @param input the input to the network to use * for evaluation * @param network the network to use for output */ public void eval(INDArray trueLabels, INDArray input, ComputationGraph network) { eval(trueLabels, network.output(false, input)[0]); }
/** * Evaluate the output * using the given true labels, * the input to the multi layer network * and the multi layer network to * use for evaluation * @param trueLabels the labels to ise * @param input the input to the network to use * for evaluation * @param network the network to use for output */ public void eval(INDArray trueLabels, INDArray input, MultiLayerNetwork network) { eval(trueLabels, network.output(input, Layer.TrainingMode.TEST)); }
/** * Returns the f1 score for the given examples. * Think of this to be like a percentage right. * The higher the number the more it got right. * This is on a scale from 0 to 1. * * @param examples te the examples to classify (one example in each row) * @param labels the true labels * @return the scores for each ndarray */ @Override public double f1Score(INDArray examples, INDArray labels) { Evaluation eval = new Evaluation(); eval.eval(labels, labelProbabilities(examples)); return eval.f1(); }
/** * Returns the f1 score for the given examples. * Think of this to be like a percentage right. * The higher the number the more it got right. * This is on a scale from 0 to 1. * * @param examples te the examples to classify (one example in each row) * @param labels the true labels * @return the scores for each ndarray */ @Override public double f1Score(INDArray examples, INDArray labels) { Evaluation eval = new Evaluation(); eval.eval(labels, labelProbabilities(examples)); return eval.f1(); }
if (isClassification) classificationEvaluation.eval(next.getLabels(), output, next.getLabelsMaskArray()); else regressionEvaluation.eval(next.getLabels(), output, next.getLabelsMaskArray());
/** * Sets the input and labels and returns a score for the prediction * wrt true labels * * @param input the input to score * @param labels the true labels * @return the score for the given input,label pairs */ @Override public double f1Score(INDArray input, INDArray labels) { feedForward(input); setLabels(labels); Evaluation eval = new Evaluation(); eval.eval(labels, labelProbabilities(input)); return eval.f1(); }
@Override public String evaluate(FederatedDataSet federatedDataSet) { //evaluate the model on the test set DataSet testData = (DataSet) federatedDataSet.getNativeDataSet(); double score = model.score(testData); Evaluation eval = new Evaluation(numClasses); INDArray output = model.output(testData.getFeatureMatrix()); eval.eval(testData.getLabels(), output); return "Score: " + score; }
@Override public String evaluate(FederatedDataSet federatedDataSet) { DataSet testData = (DataSet) federatedDataSet.getNativeDataSet(); List<DataSet> listDs = testData.asList(); DataSetIterator iterator = new ListDataSetIterator(listDs, BATCH_SIZE); Evaluation eval = new Evaluation(OUTPUT_NUM); //create an evaluation object with 10 possible classes while (iterator.hasNext()) { DataSet next = iterator.next(); INDArray output = model.output(next.getFeatureMatrix()); //get the networks prediction eval.eval(next.getLabels(), output); //check the prediction against the true class } return eval.stats(); }
INDArray output = model.output(testData.getFeatureMatrix(),false); Evaluation eval = new Evaluation(numberOfClasses); eval.eval(testData.getLabels(), output, testMetaData); //Note we are passing in the test set metadata here
DataSet next = mnistTest.next(); INDArray output = model.output(next.getFeatures(), false); //get the networks prediction eval.eval(next.getLabels(), output); //check the prediction against the true class
DataSet next = mnistTest.next(); eval.eval(next.getLabels(), output); //check the prediction against the true class
INDArray predicted = model.output(features,false); eval.eval(lables, predicted); roc.eval(lables, predicted); System.out.println(roc.calculateAUC());
INDArray output = multiLayerNetwork.output(testSet.getFeatureMatrix()); output = output.cond(new AbsValueGreaterThan(0.50)); evaluation.eval(testSet.getLabels(),output); System.out.println("args = [" + evaluation.stats() + "]");
INDArray output = model.output(testData.getFeatureMatrix()); eval.eval(testData.getLabels(), output);
DataSet next = mnistTest.next(); INDArray output = model.output(next.getFeatures(), false); //get the networks prediction eval.eval(next.getLabels(), output); //check the prediction against the true class
INDArray predicted = model.output(features,false); eval.eval(labels, predicted);