/** Calculates the inter-rater agreement for the given annotation item. * This is the basic step that is performed for each item of an * annotation study, when calling {@link #calculateAgreement()}. * @throws NullPointerException if the given item is null. */ public double calculateItemAgreement(final ICodingAnnotationItem item) { return doCalculateItemAgreement(item) / item.getRaterCount(); }
@Override public double calculateObservedAgreement() { double result = 0.0; double denominator = 0.0; for (ICodingAnnotationItem item : study.getItems()) { int raterCount = item.getRaterCount(); if (raterCount > 1) { result += doCalculateItemAgreement(item); denominator += raterCount; } } return result / denominator; }
/** Creates a new {@link CodingAnnotationItem} which has been coded with * the given annotation categories. Note that the order of the categories * must correspond to the raters' indexes. Use null to represent missing * annotations, Invoking <code>addItem("A", "B", null, "A")</code> * indicates an annotation item which has been coded as category "A" * by rater 0 and 3 and as category "B" by rater 1. Rater 2 did not * assign any category to the item. The method is a shorthand for * {@link #addItemAsArray(Object[])}. */ public ICodingAnnotationItem addItem(final Object... annotations) { return addItemAsArray(annotations); }
@Override public CodingAnnotationStudy clone() { CodingAnnotationStudy result = new CodingAnnotationStudy(getRaterCount()); for (ICodingAnnotationItem item : getItems()) { CodingAnnotationItem newItem = new CodingAnnotationItem(raters.size()); for (IAnnotationUnit unit : item.getUnits()) newItem.addUnit(result.createUnit(result.items.size(), unit.getRaterIdx(), unit.getCategory())); result.items.add(newItem); } for (Object category : getCategories()) result.addCategory(category); return result; }
public IAgreementMeasure make(ICodingAnnotationStudy aStudy) { switch (this) { case COHEN_KAPPA_AGREEMENT: return new CohenKappaAgreement(aStudy); case FLEISS_KAPPA_AGREEMENT: return new FleissKappaAgreement(aStudy); case KRIPPENDORFF_ALPHA_NOMINAL_AGREEMENT: return new KrippendorffAlphaAgreement(aStudy, new NominalDistanceFunction()); default: throw new IllegalArgumentException(); } }
/** Calculates the expected inter-rater agreement that assumes a * uniform distribution over all raters and annotations. * @throws NullPointerException if the annotation study is null. * @throws ArithmeticException if there are no annotation categories. */ public double calculateExpectedAgreement() { return 1.0 / (double) study.getCategoryCount(); }
/** Computes the maximum possible value of the kappa coefficient for the * provided study. In case of balanced off-marginals (i.e., an equal * disagreement for each pair of categories), the maximum kappa is 1. * In other cases, it decreases with a higher discrepancy of the * distribution of disagreements. */ public double calculateMaximumAgreement() { double A_O = calculateMaximumObservedAgreement(); double A_E = calculateExpectedAgreement(); if (A_E == 0.0) return A_O; else return (A_O - A_E) / (1.0 - A_E); }
/** Initializes the instance for the given annotation study. The study * may never be null. */ public CohenKappaAgreement(final ICodingAnnotationStudy study) { super(study); ensureTwoRaters(); warnIfMissingValues(); }
/** Initializes the instance for the given annotation study. The study * may never be null. */ public ScottPiAgreement(final ICodingAnnotationStudy study) { super(study); ensureTwoRaters(); warnIfMissingValues(); }
protected double doCalculateItemAgreement(final ICodingAnnotationItem item) { Map<Object, Integer> annotationsPerCategory = CodingAnnotationStudy.countTotalAnnotationsPerCategory(item); double result = 0.0; for (Integer count : annotationsPerCategory.values()) result += count * (count - 1); int raterCount = item.getRaterCount(); if (raterCount <= 1) return 0.0; else return result / (double) (raterCount - 1.0); }
/** Initializes the instance for the given annotation study. The study * should never be null. */ public HubertKappaAgreement(final ICodingAnnotationStudy study) { super(study); warnIfMissingValues(); }
@Override public int getUnitCount() { int result = 0; for (ICodingAnnotationItem item : items) result += item.getRaterCount(); return result; //return items.size() * raters.size(); }
/** Initializes the instance for the given annotation study. The study * may never be null. */ public BennettSAgreement(final ICodingAnnotationStudy study) { super(study); ensureTwoRaters(); }
/** Initializes the instance for the given annotation study. The study * may never be null. */ public FleissKappaAgreement(final ICodingAnnotationStudy study) { super(study); warnIfMissingValues(); }
/** Initializes and empty annotation study for a coding task with the given * number of raters. The basic setup of a coding study is assigning * categories to units with fixed boundaries. */ public CodingAnnotationStudy(int raterCount) { this(); for (int raterIdx = 0; raterIdx < raterCount; raterIdx++) addRater(Integer.toString(raterIdx)); }
/** Print the coincidence matrix for the given coding study. */ public void print(final PrintStream out, final ICodingAnnotationStudy study) { Map<Object, Map<Object, Double>> coincidence = CodingAnnotationStudy.countCategoryCoincidence(study); doPrint(out, study, coincidence); }
/** Shorthand for invoking {@link #addItem(Object...)} with the same * parameters multiple times. This method is useful for modeling * annotation data based on a contingency table. */ public void addMultipleItems(int times, final Object... values) { for (int i = 0; i < times; i++) addItemAsArray(values); }
/** Calculates the expected inter-rater agreement that assumes a * uniform distribution over all raters and annotations. * @throws NullPointerException if the annotation study is null. * @throws ArithmeticException if there are no annotation categories. */ @Override public double calculateExpectedAgreement() { return 1.0 / (double) study.getCategoryCount(); }