/** * Evaluates a given single-label classifier on given train and test sets. * * @param cl * single-label classifier, needs to be trained beforehand * @param trainData * @param testData * @return * @throws Exception */ public static Evaluation getEvaluationSinglelabel(Classifier cl, Instances trainData, Instances testData) throws Exception { Evaluation eval = new Evaluation(trainData); eval.evaluateModel(cl, testData); return eval; }
private void createWekaEvaluationObject(Classifier classifier, File evalOutput, Instances trainData, Instances testData) throws Exception { Evaluation eval = new Evaluation(trainData); eval.evaluateModel(classifier, testData); weka.core.SerializationHelper.write(evalOutput.getAbsolutePath(), eval); }
protected void createWekaEvaluationObject(Classifier classifier, File evalOutput, Instances trainData, Instances testData) throws Exception { Evaluation eval = new Evaluation(trainData); eval.evaluateModel(classifier, testData); weka.core.SerializationHelper.write(evalOutput.getAbsolutePath(), eval); }
public Evaluation classify(Classifier model, Instances trainingSet, Instances testingSet) throws Exception { Evaluation evaluation = new Evaluation(trainingSet); model.buildClassifier(trainingSet); evaluation.evaluateModel(model, testingSet); return evaluation; }
/** * Returns the error of the probability estimates for the current model on a * set of instances. * * @param data the set of instances * @return the error * @throws Exception if something goes wrong */ protected double getMeanAbsoluteError(Instances data) throws Exception { Evaluation eval = new Evaluation(data); eval.evaluateModel(this, data); return eval.meanAbsoluteError(); }
/** * Returns the error of the probability estimates for the current model on a * set of instances. * * @param data the set of instances * @return the error * @throws Exception if something goes wrong */ protected double getMeanAbsoluteError(Instances data) throws Exception { Evaluation eval = new Evaluation(data); eval.evaluateModel(this, data); return eval.meanAbsoluteError(); }
/** * Returns the misclassification error of the current model on a set of * instances. * * @param data the set of instances * @return the error rate * @throws Exception if something goes wrong */ protected double getErrorRate(Instances data) throws Exception { Evaluation eval = new Evaluation(data); eval.evaluateModel(this, data); return eval.errorRate(); }
/** * Returns the misclassification error of the current model on a set of * instances. * * @param data the set of instances * @return the error rate * @throws Exception if something goes wrong */ protected double getErrorRate(Instances data) throws Exception { Evaluation eval = new Evaluation(data); eval.evaluateModel(this, data); return eval.errorRate(); }
Evaluation evaluation = new Evaluation(dataSet.getData()); evaluation.crossValidateModel(classifier, dataSet.getData(), this.numFolds, new Random(1)); result.append("evaluation summary:");
/** * Perform crossvalidation * * @param clf Classifier * @param data Full dataset * @throws Exception */ public static void crossValidate(Classifier clf, Instances data) throws Exception { Evaluation ev = new Evaluation(data); ev.crossValidateModel(clf, data, 10, new Random(42)); logger.info(ev.toSummaryString()); }
/** * Get training error (from loaded data). * * @param verbose option to display evaluation information in the log window * @return classifier error on the training data set. */ public double getTrainingError(boolean verbose) { if(null == this.trainHeader) return -1; double error = -1; try { final Evaluation evaluation = new Evaluation(this.loadedTrainingData); evaluation.evaluateModel(classifier, this.loadedTrainingData); if(verbose) IJ.log(evaluation.toSummaryString("\n=== Training set evaluation ===\n", false)); error = evaluation.errorRate(); } catch (Exception e) { e.printStackTrace(); } return error; }
/** * Get training error (from loaded data). * * @param verbose option to display evaluation information in the log window * @return classifier error on the training data set. */ public double getTrainingError(boolean verbose) { if(null == this.trainHeader) return -1; double error = -1; try { final Evaluation evaluation = new Evaluation(this.loadedTrainingData); evaluation.evaluateModel(classifier, this.loadedTrainingData); if(verbose) IJ.log(evaluation.toSummaryString("\n=== Training set evaluation ===\n", false)); error = evaluation.errorRate(); } catch (Exception e) { e.printStackTrace(); } return error; }
public void getLinearCombination(List<OWLClassExpression> descriptions){ //get common data Instances data = buildData(descriptions); //compute linear regression model data.setClassIndex(data.numAttributes() - 1); AbstractClassifier model = new LinearRegression(); model = new J48(); try { model.buildClassifier(data); // System.out.println(model); // AddExpression filter = new AddExpression(); // filter.setExpression("a1^2"); // FilteredClassifier filteredClassifier = new FilteredClassifier(); // filteredClassifier.setClassifier(model); // filteredClassifier.setFilter(filter); // filteredClassifier.buildClassifier(data); // logger.debug(filteredClassifier.getClassifier()); Evaluation eval = new Evaluation(data); eval.crossValidateModel(model, data, 10, new Random(1)); System.out.println(eval.toSummaryString(true)); } catch (Exception e) { e.printStackTrace(); } }
/** * Determine whether the scheme performs worse than ZeroR during testing * * @param classifier the pre-trained classifier * @param evaluation the classifier evaluation object * @param train the training data * @param test the test data * @return index 0 is true if the scheme performs better than ZeroR * @throws Exception if there was a problem during the scheme's testing */ protected boolean[] testWRTZeroR(Classifier classifier, Evaluation evaluation, Instances train, Instances test) throws Exception { boolean[] result = new boolean[2]; evaluation.evaluateModel(classifier, test); try { // Tested OK, compare with ZeroR Classifier zeroR = new weka.classifiers.rules.ZeroR(); zeroR.buildClassifier(train); Evaluation zeroREval = new Evaluation(train); zeroREval.evaluateModel(zeroR, test); result[0] = Utils.grOrEq(zeroREval.errorRate(), evaluation.errorRate()); } catch (Exception ex) { throw new Error("Problem determining ZeroR performance: " + ex.getMessage()); } return result; }
/** * Determine whether the scheme performs worse than ZeroR during testing * * @param classifier the pre-trained classifier * @param evaluation the classifier evaluation object * @param train the training data * @param test the test data * @return index 0 is true if the scheme performs better than ZeroR * @throws Exception if there was a problem during the scheme's testing */ protected boolean[] testWRTZeroR(Classifier classifier, Evaluation evaluation, Instances train, Instances test) throws Exception { boolean[] result = new boolean[2]; evaluation.evaluateModel(classifier, test); try { // Tested OK, compare with ZeroR Classifier zeroR = new weka.classifiers.rules.ZeroR(); zeroR.buildClassifier(train); Evaluation zeroREval = new Evaluation(train); zeroREval.evaluateModel(zeroR, test); result[0] = Utils.grOrEq(zeroREval.errorRate(), evaluation.errorRate()); } catch (Exception ex) { throw new Error("Problem determining ZeroR performance: " + ex.getMessage()); } return result; }
public static void evaluate(Dl4jMlpClassifier clf, Instances data, double minPerfomance) throws Exception { Instances[] split = TestUtil.splitTrainTest(data); Instances train = split[0]; Instances test = split[1]; clf.buildClassifier(train); Evaluation trainEval = new Evaluation(train); trainEval.evaluateModel(clf, train); Evaluation testEval = new Evaluation(train); testEval.evaluateModel(clf, test); final double testPctCorrect = testEval.pctCorrect(); final double trainPctCorrect = trainEval.pctCorrect(); log.info("Train: {}, Test: {}", trainPctCorrect, testPctCorrect); boolean success = testPctCorrect > minPerfomance && trainPctCorrect > minPerfomance; log.info("Success: " + success); log.info(clf.getModel().conf().toYaml()); Assert.assertTrue("Performance was < " + minPerfomance + ". TestPctCorrect: " + testPctCorrect +", TrainPctCorrect: " + trainPctCorrect, success); }
/** * Traverses the tree and installs linear models at each node. This method * must be called if pruning is not to be performed. * * @throws Exception if an error occurs */ public void installLinearModels() throws Exception { Evaluation nodeModelEval; if (m_isLeaf) { buildLinearModel(m_indices); } else { if (m_left != null) { m_left.installLinearModels(); } if (m_right != null) { m_right.installLinearModels(); } buildLinearModel(m_indices); } nodeModelEval = new Evaluation(m_instances); nodeModelEval.evaluateModel(m_nodeModel, m_instances); m_rootMeanSquaredError = nodeModelEval.rootMeanSquaredError(); // save space if (!m_saveInstances) { m_instances = new Instances(m_instances, 0); } }
filteredClassifier.buildClassifier(train); Evaluation eval = new Evaluation(train); eval.evaluateModel(filteredClassifier, test);
filteredClassifier.buildClassifier(train); Evaluation eval = new Evaluation(train); eval.evaluateModel(filteredClassifier, test);
private static Evaluation eval(Instances metaData) throws Exception { String imagesPath = "src/test/resources/nominal/mnist-minimal"; Dl4jMlpClassifier clf = new Dl4jMlpClassifier(); ImageInstanceIterator iii = new ImageInstanceIterator(); iii.setImagesLocation(new File(imagesPath)); iii.setTrainBatchSize(2); clf.setInstanceIterator(iii); clf.setNumEpochs(5); // Build clf clf.buildClassifier(metaData); // Evaluate clf Evaluation trainEval = new Evaluation(metaData); trainEval.evaluateModel(clf, metaData); return trainEval; } }