/** * A test method for this class. Just extracts the first command line argument * as a classifier class name and calls evaluateModel. * * @param args an array of command line arguments, the first of which must be * the class name of a classifier. */ public static void main(String[] args) { try { if (args.length == 0) { throw new Exception("The first argument must be the class name" + " of a classifier"); } String classifier = args[0]; args[0] = ""; System.out.println(evaluateModel(classifier, args)); } catch (Exception ex) { ex.printStackTrace(); System.err.println(ex.getMessage()); } }
/** * A test method for this class. Just extracts the first command line argument * as a classifier class name and calls evaluateModel. * * @param args an array of command line arguments, the first of which must be * the class name of a classifier. */ public static void main(String[] args) { try { if (args.length == 0) { throw new Exception("The first argument must be the class name" + " of a classifier"); } String classifier = args[0]; args[0] = ""; System.out.println(evaluateModel(classifier, args)); } catch (Exception ex) { ex.printStackTrace(); System.err.println(ex.getMessage()); } }
/** * Evaluates a given single-label classifier on given train and test sets. * * @param cl * single-label classifier, needs to be trained beforehand * @param trainData * @param testData * @return * @throws Exception */ public static Evaluation getEvaluationSinglelabel(Classifier cl, Instances trainData, Instances testData) throws Exception { Evaluation eval = new Evaluation(trainData); eval.evaluateModel(cl, testData); return eval; }
private void createWekaEvaluationObject(Classifier classifier, File evalOutput, Instances trainData, Instances testData) throws Exception { Evaluation eval = new Evaluation(trainData); eval.evaluateModel(classifier, testData); weka.core.SerializationHelper.write(evalOutput.getAbsolutePath(), eval); }
protected void createWekaEvaluationObject(Classifier classifier, File evalOutput, Instances trainData, Instances testData) throws Exception { Evaluation eval = new Evaluation(trainData); eval.evaluateModel(classifier, testData); weka.core.SerializationHelper.write(evalOutput.getAbsolutePath(), eval); }
public Evaluation classify(Classifier model, Instances trainingSet, Instances testingSet) throws Exception { Evaluation evaluation = new Evaluation(trainingSet); model.buildClassifier(trainingSet); evaluation.evaluateModel(model, testingSet); return evaluation; }
/** * runs the classifier instance with the given options. * * @param classifier the classifier to run * @param options the commandline options */ public static void runClassifier(Classifier classifier, String[] options) { try { if (classifier instanceof CommandlineRunnable) { ((CommandlineRunnable)classifier).preExecution(); } System.out.println(Evaluation.evaluateModel(classifier, options)); } catch (Exception e) { if (((e.getMessage() != null) && (e.getMessage().indexOf("General options") == -1)) || (e.getMessage() == null)) { e.printStackTrace(); } else { System.err.println(e.getMessage()); } } if (classifier instanceof CommandlineRunnable) { try { ((CommandlineRunnable) classifier).postExecution(); } catch (Exception ex) { ex.printStackTrace(); } } }
/** * Returns the error of the probability estimates for the current model on a * set of instances. * * @param data the set of instances * @return the error * @throws Exception if something goes wrong */ protected double getMeanAbsoluteError(Instances data) throws Exception { Evaluation eval = new Evaluation(data); eval.evaluateModel(this, data); return eval.meanAbsoluteError(); }
/** * Returns the misclassification error of the current model on a set of * instances. * * @param data the set of instances * @return the error rate * @throws Exception if something goes wrong */ protected double getErrorRate(Instances data) throws Exception { Evaluation eval = new Evaluation(data); eval.evaluateModel(this, data); return eval.errorRate(); }
/** * Returns the misclassification error of the current model on a set of * instances. * * @param data the set of instances * @return the error rate * @throws Exception if something goes wrong */ protected double getErrorRate(Instances data) throws Exception { Evaluation eval = new Evaluation(data); eval.evaluateModel(this, data); return eval.errorRate(); }
/** * Returns the error of the probability estimates for the current model on a * set of instances. * * @param data the set of instances * @return the error * @throws Exception if something goes wrong */ protected double getMeanAbsoluteError(Instances data) throws Exception { Evaluation eval = new Evaluation(data); eval.evaluateModel(this, data); return eval.meanAbsoluteError(); }
/** * Determine whether the scheme performs worse than ZeroR during testing * * @param classifier the pre-trained classifier * @param evaluation the classifier evaluation object * @param train the training data * @param test the test data * @return index 0 is true if the scheme performs better than ZeroR * @throws Exception if there was a problem during the scheme's testing */ protected boolean[] testWRTZeroR(Classifier classifier, Evaluation evaluation, Instances train, Instances test) throws Exception { boolean[] result = new boolean[2]; evaluation.evaluateModel(classifier, test); try { // Tested OK, compare with ZeroR Classifier zeroR = new weka.classifiers.rules.ZeroR(); zeroR.buildClassifier(train); Evaluation zeroREval = new Evaluation(train); zeroREval.evaluateModel(zeroR, test); result[0] = Utils.grOrEq(zeroREval.errorRate(), evaluation.errorRate()); } catch (Exception ex) { throw new Error("Problem determining ZeroR performance: " + ex.getMessage()); } return result; }
/** * Determine whether the scheme performs worse than ZeroR during testing * * @param classifier the pre-trained classifier * @param evaluation the classifier evaluation object * @param train the training data * @param test the test data * @return index 0 is true if the scheme performs better than ZeroR * @throws Exception if there was a problem during the scheme's testing */ protected boolean[] testWRTZeroR(Classifier classifier, Evaluation evaluation, Instances train, Instances test) throws Exception { boolean[] result = new boolean[2]; evaluation.evaluateModel(classifier, test); try { // Tested OK, compare with ZeroR Classifier zeroR = new weka.classifiers.rules.ZeroR(); zeroR.buildClassifier(train); Evaluation zeroREval = new Evaluation(train); zeroREval.evaluateModel(zeroR, test); result[0] = Utils.grOrEq(zeroREval.errorRate(), evaluation.errorRate()); } catch (Exception ex) { throw new Error("Problem determining ZeroR performance: " + ex.getMessage()); } return result; }
/** * Get training error (from loaded data). * * @param verbose option to display evaluation information in the log window * @return classifier error on the training data set. */ public double getTrainingError(boolean verbose) { if(null == this.trainHeader) return -1; double error = -1; try { final Evaluation evaluation = new Evaluation(this.loadedTrainingData); evaluation.evaluateModel(classifier, this.loadedTrainingData); if(verbose) IJ.log(evaluation.toSummaryString("\n=== Training set evaluation ===\n", false)); error = evaluation.errorRate(); } catch (Exception e) { e.printStackTrace(); } return error; }
/** * Get training error (from loaded data). * * @param verbose option to display evaluation information in the log window * @return classifier error on the training data set. */ public double getTrainingError(boolean verbose) { if(null == this.trainHeader) return -1; double error = -1; try { final Evaluation evaluation = new Evaluation(this.loadedTrainingData); evaluation.evaluateModel(classifier, this.loadedTrainingData); if(verbose) IJ.log(evaluation.toSummaryString("\n=== Training set evaluation ===\n", false)); error = evaluation.errorRate(); } catch (Exception e) { e.printStackTrace(); } return error; }
eval.evaluateModel(filteredClassifier, test);
public static void evaluate(Dl4jMlpClassifier clf, Instances data, double minPerfomance) throws Exception { Instances[] split = TestUtil.splitTrainTest(data); Instances train = split[0]; Instances test = split[1]; clf.buildClassifier(train); Evaluation trainEval = new Evaluation(train); trainEval.evaluateModel(clf, train); Evaluation testEval = new Evaluation(train); testEval.evaluateModel(clf, test); final double testPctCorrect = testEval.pctCorrect(); final double trainPctCorrect = trainEval.pctCorrect(); log.info("Train: {}, Test: {}", trainPctCorrect, testPctCorrect); boolean success = testPctCorrect > minPerfomance && trainPctCorrect > minPerfomance; log.info("Success: " + success); log.info(clf.getModel().conf().toYaml()); Assert.assertTrue("Performance was < " + minPerfomance + ". TestPctCorrect: " + testPctCorrect +", TrainPctCorrect: " + trainPctCorrect, success); }
/** * Traverses the tree and installs linear models at each node. This method * must be called if pruning is not to be performed. * * @throws Exception if an error occurs */ public void installLinearModels() throws Exception { Evaluation nodeModelEval; if (m_isLeaf) { buildLinearModel(m_indices); } else { if (m_left != null) { m_left.installLinearModels(); } if (m_right != null) { m_right.installLinearModels(); } buildLinearModel(m_indices); } nodeModelEval = new Evaluation(m_instances); nodeModelEval.evaluateModel(m_nodeModel, m_instances); m_rootMeanSquaredError = nodeModelEval.rootMeanSquaredError(); // save space if (!m_saveInstances) { m_instances = new Instances(m_instances, 0); } }
/** * Traverses the tree and installs linear models at each node. This method * must be called if pruning is not to be performed. * * @throws Exception if an error occurs */ public void installLinearModels() throws Exception { Evaluation nodeModelEval; if (m_isLeaf) { buildLinearModel(m_indices); } else { if (m_left != null) { m_left.installLinearModels(); } if (m_right != null) { m_right.installLinearModels(); } buildLinearModel(m_indices); } nodeModelEval = new Evaluation(m_instances); nodeModelEval.evaluateModel(m_nodeModel, m_instances); m_rootMeanSquaredError = nodeModelEval.rootMeanSquaredError(); // save space if (!m_saveInstances) { m_instances = new Instances(m_instances, 0); } }
private static Evaluation eval(Instances metaData) throws Exception { String imagesPath = "src/test/resources/nominal/mnist-minimal"; Dl4jMlpClassifier clf = new Dl4jMlpClassifier(); ImageInstanceIterator iii = new ImageInstanceIterator(); iii.setImagesLocation(new File(imagesPath)); iii.setTrainBatchSize(2); clf.setInstanceIterator(iii); clf.setNumEpochs(5); // Build clf clf.buildClassifier(metaData); // Evaluate clf Evaluation trainEval = new Evaluation(metaData); trainEval.evaluateModel(clf, metaData); return trainEval; } }