public Evaluation(Instances data) throws Exception { m_delegate = new weka.classifiers.evaluation.Evaluation(data); }
/** * Evaluates the supplied distribution on a single instance. * * @param dist the supplied distribution * @param instance the test instance to be classified * @return the prediction * @throws Exception if model could not be evaluated successfully */ public double evaluateModelOnceAndRecordPrediction(double[] dist, Instance instance) throws Exception { return m_delegate.evaluateModelOnceAndRecordPrediction(dist, instance); }
//set the class index dataFiltered.setClassIndex(dataFiltered.numAttributes() - 1); //build a model -- choose a classifier as you want classifier.buildClassifier(dataFiltered); Evaluation eval = new Evaluation(dataFiltered); eval.crossValidateModel(classifier, dataFiltered, 10, new Random(1)); //print stats -- do not require to calculate confusion mtx, weka do it! System.out.println(classifier); System.out.println(eval.toSummaryString()); System.out.println(eval.toMatrixString()); System.out.println(eval.toClassDetailsString());
Evaluation eval = new Evaluation(train); eval.evaluateModel(mlp, train); System.out.println(eval.errorRate()); //Printing Training Mean root squared Error System.out.println(eval.toSummaryString()); //Summary of Training
Evaluation eval = new Evaluation(data); eval.evaluateModel(j48DecisionTree, data); System.out.println(eval.toSummaryString("\nResults\n======\n", true));
throw new Exception("\nHelp requested." + makeOptionString(classifier, globalInfo)); throw new Exception("\nWeka exception: " + ex.getMessage() + makeOptionString(classifier, false)); objectInputFileName = ""; // We have not actually read a built model, only some options } catch (IllegalArgumentException ex) { classifier = getModelFromFile(objectInputFileName, template); classifier = getModelFromFile(objectInputFileName, template); costMatrix = handleCostOption(Utils.getOption('m', options), template.numClasses()); throw new Exception("\nWeka exception: " + ex.getMessage() + makeOptionString(classifier, false)); return wekaStaticWrapper((Sourcable) classifier, sourceClass); saveClassifier(classifier, template, objectOutputFileName); } else if (!noCrossValidation) { // CASE 3: CROSS-VALIDATION Random random = new Random(seed); Evaluation testingEvaluation = new Evaluation(new Instances(template, 0), costMatrix); if (classifier instanceof weka.classifiers.misc.InputMappedClassifier) { testingEvaluation = new Evaluation(new Instances(mappedClassifierHeader, 0), costMatrix); testingEvaluation.toggleEvalMetrics(toggleList); classifier = AbstractClassifier.makeCopy(classifierBackup); predsBuff.append("\n=== Predictions under cross-validation ===\n\n"); testingEvaluation.crossValidateModel(classifier, new DataSource(trainFileName).getDataSet(actualClassIndex), folds, random, classificationOutput);
@Test public void testCrossValidateIncrementalMapOnlyRetainPredsForAUC() throws Exception { Instances train = new Instances(new BufferedReader(new StringReader( CorrelationMatrixMapTaskTest.IRIS))); train.setClassIndex(train.numAttributes() - 1); assertEquals(15, (int) eval.numInstances()); assertEquals(15, eval.predictions().size()); assertTrue(!Utils.isMissingValue(eval.areaUnderROC(0)));
public void testRegression() throws Exception { Instances inst = new Instances(new StringReader(DATA)); inst.setClassIndex(inst.numAttributes() - 1); Evaluation eval = new Evaluation(inst); for (int i = 0; i < inst.numInstances(); i++) { eval.evaluateModelOnceAndRecordPrediction(PREDS[i], inst.instance(i)); } String standard = eval.toSummaryString(); String info = eval.toClassDetailsString(); weka.test.Regression reg = new weka.test.Regression(getClass()); reg.println(standard); reg.println(info); try { String diff = reg.diff(); if (diff == null) { System.err.println("Warning: No reference available, creating."); } else if (!diff.equals("")) { fail("Regression tst failed. Difference:\n" + diff); } } catch (IOException ex) { fail("Problem during regression testing.\n" + ex); } }
data = new Instances(data); data.deleteWithMissingClass(); if (data.numInstances() < m_NumFolds) { System.err.println("WARNING: reducing number of folds to number of instances in " + "IterativeClassifierOptimizer"); Evaluation eval = new Evaluation(data); EvaluationMetricHelper helper = new EvaluationMetricHelper(eval); boolean maximise = helper.metricIsMaximisable(m_evalMetric); eval = new Evaluation(data); helper.setEvaluation(eval); for (int r = 0; r < numRuns; r++) { for (int i = 0; i < numFolds; i++) { eval.evaluateModel(classifiers[r][i], testSets[r][i]); eval = new Evaluation(trainingSets[r][i]); helper.setEvaluation(eval); eval.evaluateModel(classifiers[r][i], testSets[r][i]); result += getClassValueIndex() >= 0 ?
m_data = new Instances(data); m_Numeric = m_data.classAttribute().isNumeric(); m_OutOfBagEvaluationObject = new Evaluation(m_data); for (int i = 0; i < m_data.numInstances(); i++) { double[] votes; if (m_Numeric) double pred = m_Classifiers[j].classifyInstance(m_data.instance(i)); if (!Utils.isMissingValue(pred)) { votes[0] += pred; voteCount++; if (voteCount > 0) { votes[0] /= voteCount; m_OutOfBagEvaluationObject.evaluationForSingleInstance(votes, m_data.instance(i), getStoreOutOfBagPredictions()); double sum = Utils.sum(votes); if (sum > 0) { Utils.normalize(votes, sum); m_OutOfBagEvaluationObject.evaluationForSingleInstance(votes, m_data.instance(i), getStoreOutOfBagPredictions());
BufferedReader br = new BufferedReader(fr); Instances data = new Instances(br); br.close(); data.setClassIndex(data.numAttributes() - 1); eval.crossValidateModel(tree, data, 10, new Random(1)); eval1.evaluateModel(cls, test);
Instances train = new Instances(new BufferedReader(new FileReader("Train.arff"))); train.setClassIndex(0);//in my case the class was the first attribute thus zero otherwise it's the number of attributes -1 Instances unlabeled = new Instances(new BufferedReader(new FileReader("Test.arff"))); unlabeled.setClassIndex(0); eval.crossValidateModel(classifier, train, 10, new Random(1)); String output = eval.toSummaryString(); System.out.println(output); String classDetails = eval.toClassDetailsString(); System.out.println(classDetails);
double numCorrect = eval.correct(); double numIncorrect = eval.incorrect(); double MAE = eval.meanAbsoluteError(); double RMSE = eval.rootMeanSquaredError(); double RAE = eval.relativeAbsoluteError(); double RRSE = eval.rootRelativeSquaredError(); double totalNumberOfInstances = eval.numInstances(); if (eval.getHeader().classAttribute().isNominal()) { for (int i = 0; i < eval.getHeader().classAttribute().numValues(); i++) { String classLabel = eval.getHeader().classAttribute().value(i) + "_"; atts.add(new Attribute(classLabel + "TP Rate")); Instances evalInsts = new Instances("Evaluation results: " + eval.getHeader().relationName(), atts, 1); if (eval.getHeader().classAttribute().isNominal()) { vals[offset++] = eval.kappa(); for (int i = 0; i < eval.getHeader().classAttribute().numValues(); i++) { vals[offset++] = eval.truePositiveRate(i); vals[offset++] = eval.falseNegativeRate(i); vals[offset++] = eval.precision(i); vals[offset++] = eval.recall(i); vals[offset++] = eval.fMeasure(i); vals[offset++] = eval.areaUnderROC(i); vals[offset++] = eval.areaUnderPRC(i);
Classifier nbTree = (Classifier)SerializationHelper.read(Model) as NBTree; Instances testDataSet = new Instances(new BufferedReader(new FileReader(arff))); testDataSet.setClassIndex(10); Evaluation evaluation = new Evaluation(testDataSet); for (int i = 0; i < testDataSet.numInstances(); i++) { Instance instance = testDataSet.instance(i); evaluation.evaluateModelOnceAndRecordPrediction(nbTree, instance); } foreach (object o in evaluation.predictions().toArray()) { NominalPrediction prediction = o as NominalPrediction; if (prediction != null) { double[] distribution = prediction.distribution(); double predicted = prediction.predicted(); } }
public static void classify() { try { Instances train = new Instances (...); train.setClassIndex(train.numAttributes() - 1); Instances test = new Instances (...); test.setClassIndex(test.numAttributes() - 1); ClassificationType classificationType = ClassificationTypeDAO.get(6); // 6 is SVM. LibSVM classifier = new LibSVM(); String options = (classificationType.getParameters()); String[] optionsArray = options.split(" "); classifier.setOptions(optionsArray); classifier.buildClassifier(train); Evaluation eval = new Evaluation(train); eval.evaluateModel(classifier, test); System.out.println(eval.toSummaryString("\nResults\n======\n", false)); } catch (Exception ex) { Misc_Utils.printStackTrace(ex); } }
Instances train = new Instances(breader); train.setClassIndex(train.numAttributes() - 1); breader.close(); eval.crossValidateModel(nB, train, 10, new Random(1)); System.out.println(nB); System.out.println(eval.toSummaryString("\nSummary Results\n==================", true)); System.out.println(eval.toClassDetailsString()); System.out.println(eval.toMatrixString()); txtAreaShow.append("" + nB); txtAreaShow.append(eval.toSummaryString("\n\nSummary Results\n==================\n", true)); txtAreaShow.append(eval.toClassDetailsString()); txtAreaShow.append(eval.toMatrixString()); txtAreaShow.append("\n\n\n");
data = new Instances(data); data.randomize(random); if (data.classAttribute().isNominal()) { data.stratify(numFolds); Instances train = data.trainCV(numFolds, i, random); setPriors(train); Classifier copiedClassifier = AbstractClassifier.makeCopy(classifier); copiedClassifier.buildClassifier(train); Instances test = data.testCV(numFolds, i); if (classificationOutput != null){ evaluateModel(copiedClassifier, test, forPrinting); } else { evaluateModel(copiedClassifier, test);
double predictions[] = new double[data.numInstances()]; && ((BatchPredictor) classifier).implementsMoreEfficientBatchPrediction()) { Instances dataPred = new Instances(data); for (int i = 0; i < data.numInstances(); i++) { dataPred.instance(i).setClassMissing(); double[] p = preds[i]; predictions[i] = evaluationForSingleInstance(p, data.instance(i), true); for (int i = 0; i < data.numInstances(); i++) { predictions[i] = evaluateModelOnceAndRecordPrediction(classifier, data.instance(i)); if (classificationOutput != null) { classificationOutput.printClassification(classifier,
m_trainingHeader.compactify(); test.randomize(r); if (test.classAttribute().isNominal() && m_totalFolds > 1) { test.stratify(m_totalFolds); m_eval.evaluateModel(m_classifier, test); if (m_predFrac <= 0) { ((AggregateableEvaluationWithPriors) m_eval).deleteStoredPredictions(); for (int i = 0; i < test.numInstances(); i++) { if (m_predFrac > 0) { m_eval.evaluateModelOnceAndRecordPrediction(m_classifier, test.instance(i)); } else { m_eval.evaluateModelOnce(m_classifier, test.instance(i));
String results = "=== Performance information ===\n\n" + "Scheme: " + textTitle + "\n" + "Relation: " + m_eval.getHeader().relationName() + "\n\n" + m_eval.toSummaryString(); if (m_eval.getHeader().classIndex() >= 0 && m_eval.getHeader().classAttribute().isNominal() && (m_outputInfoRetrievalStats)) { results += "\n" + m_eval.toClassDetailsString(); if (m_eval.getHeader().classIndex() >= 0 && m_eval.getHeader().classAttribute().isNominal()) { results += "\n" + m_eval.toMatrixString(); m_classifierName.substring(m_classifierName.lastIndexOf(".") + 1, m_classifierName.length()); m_eval = new Evaluation(instance.dataset()); m_eval.useNoPriors(); if (m_windowSize > 0) { m_windowEval = new Evaluation(instance.dataset()); m_windowEval.useNoPriors(); if (!instance.classIsMissing()) { if (m_outputInfoRetrievalStats) { m_eval.evaluateModelOnceAndRecordPrediction(dist, instance); } else { m_eval.evaluateModelOnce(dist, instance); m_windowEval.evaluateModelOnce(dist, instance); m_window.addFirst(instance);