public void toCompactFeatureRep(FeatureMap map) { if (words == null) { activeFeatures = null; return; } this.activeFeatures = getActiveFid(map); this.words = null; }
public void toCompactFeatureRep(FeatureMap map) { if (words == null) { activeFeatures = null; return; } this.activeFeatures = getActiveFid(map); this.words = null; }
public void toCompactFeatureRep(FeatureMap map) { if (words == null) { activeFeatures = null; return; } this.activeFeatures = getActiveFid(map); this.words = null; }
public void onlineLearning(Document doc) { weightedOnlineLearning(doc.getActiveFid(map), 1.0, doc.classID); }
public void onlineLearning(Document doc) { weightedOnlineLearning(doc.getActiveFid(map), 1.0, doc.classID); }
public void onlineLearning(Document doc) { weightedOnlineLearning(doc.getActiveFid(map), 1.0, doc.classID); }
public double[] getPredictionConfidence(Document doc) { double[] classLLProbs = new double[classesN]; int[] activeFeats = doc.getActiveFid(map); for (int i = 0; i < classesN; i++) { classLLProbs[i] = Math.log(getPrior(i)); for (int activeFeat : activeFeats) classLLProbs[i] += Math.log(getFidProb(activeFeat, i)); } int maxClass = 0; for (int i = 0; i < classesN; i++) if (classLLProbs[i] > classLLProbs[maxClass]) maxClass = i; // all the log-likelihoods are negative, so we're selecting the maximum LL // e.g. if the LLs were: -2001,-2002,-2003, we choose -2001. // then me multiply all the numbers by e^(-2001) which allows // us to do precise math double denom = 0; double[] res = new double[classesN]; for (int i = 0; i < classesN; i++) { res[i] += Math.exp(classLLProbs[i] - classLLProbs[maxClass]); denom += res[i]; } for (int i = 0; i < classesN; i++) res[i] = res[i] / denom; return res; }
public double[] getPredictionConfidence(Document doc) { double[] classLLProbs = new double[classesN]; int[] activeFeats = doc.getActiveFid(map); for (int i = 0; i < classesN; i++) { classLLProbs[i] = Math.log(getPrior(i)); for (int activeFeat : activeFeats) classLLProbs[i] += Math.log(getFidProb(activeFeat, i)); } int maxClass = 0; for (int i = 0; i < classesN; i++) if (classLLProbs[i] > classLLProbs[maxClass]) maxClass = i; // all the log-likelihoods are negative, so we're selecting the maximum LL // e.g. if the LLs were: -2001,-2002,-2003, we choose -2001. // then me multiply all the numbers by e^(-2001) which allows // us to do precise math double denom = 0; double[] res = new double[classesN]; for (int i = 0; i < classesN; i++) { res[i] += Math.exp(classLLProbs[i] - classLLProbs[maxClass]); denom += res[i]; } for (int i = 0; i < classesN; i++) res[i] = res[i] / denom; return res; }
public double[] getPredictionConfidence(Document doc) { double[] classLLProbs = new double[classesN]; int[] activeFeats = doc.getActiveFid(map); for (int i = 0; i < classesN; i++) { classLLProbs[i] = Math.log(getPrior(i)); for (int activeFeat : activeFeats) classLLProbs[i] += Math.log(getFidProb(activeFeat, i)); } int maxClass = 0; for (int i = 0; i < classesN; i++) if (classLLProbs[i] > classLLProbs[maxClass]) maxClass = i; // all the log-likelihoods are negative, so we're selecting the maximum LL // e.g. if the LLs were: -2001,-2002,-2003, we choose -2001. // then me multiply all the numbers by e^(-2001) which allows // us to do precise math double denom = 0; double[] res = new double[classesN]; for (int i = 0; i < classesN; i++) { res[i] += Math.exp(classLLProbs[i] - classLLProbs[maxClass]); denom += res[i]; } for (int i = 0; i < classesN; i++) res[i] = res[i] / denom; return res; }