/** Creates a default SGDText */ public Classifier getClassifier() { SGDText p = new SGDText(); p.setEpochs(1); p.setLearningRate(0.001); return p; }
protected double svmOutput() { double wx = dotProd(m_inputVector); double z = (wx + m_bias); return z; }
/** * Main method for testing this class. */ public static void main(String[] args) { runClassifier(new SGDText(), args); } }
reset(); setLossFunction(new SelectedTag(Integer.parseInt(lossString), TAGS_SELECTION)); setOutputProbsForSVM(Utils.getFlag("output-probs", options)); setLambda(Double.parseDouble(lambdaString)); setLearningRate(Double.parseDouble(learningRateString)); setEpochs(Integer.parseInt(epochsString)); setUseWordFrequencies(Utils.getFlag("W", options)); setPeriodicPruning(Integer.parseInt(pruneFreqS)); setMinWordFrequency(Double.parseDouble(minFreq)); setMinAbsoluteCoefficientValue(Double.parseDouble(minCoeff)); setNormalizeDocLength(Utils.getFlag("normalize", options)); setNorm(Double.parseDouble(normFreqS)); setLNorm(Double.parseDouble(lnormFreqS)); setLowercaseTokens(Utils.getFlag("lowercase", options));
options.add("" + getLossFunction().getSelectedTag().getID()); if (getOutputProbsForSVM()) { options.add("-output-probs"); options.add("" + getLearningRate()); options.add("-R"); options.add("" + getLambda()); options.add("-E"); options.add("" + getEpochs()); if (getUseWordFrequencies()) { options.add("-W"); options.add("" + getPeriodicPruning()); options.add("-M"); options.add("" + getMinWordFrequency()); options.add("" + getMinAbsoluteCoefficientValue()); if (getNormalizeDocLength()) { options.add("-normalize"); options.add("" + getNorm()); options.add("-lnorm"); options.add("" + getLNorm()); if (getLowercaseTokens()) { options.add("-lowercase"); if (getStopwordsHandler() != null) { options.add("-stopwords-handler");
reset(); getCapabilities().testWithFail(data); initializeSVMProbs(data); data.randomize(new Random(getSeed())); train(data); pruneDictionary(true);
tokenizeInstance(instance, updateDictionary); double pred = svmOutput(); double[] vals = new double[2]; vals[0] = pred; double wx = dotProd(m_inputVector); double y = (instance.classValue() == 0) ? -1 : 1; double z = y * (wx + m_bias); double dloss = dloss(z); double factor = m_learningRate * y * dloss;
double[] result = new double[2]; tokenizeInstance(inst, false); double wx = dotProd(m_inputVector); double z = (wx + m_bias);
throw new Exception("No model built yet, can't aggregate"); LinkedHashMap<String, SGDText.Count> tempDict = toAggregate.getDictionary(); m_bias += toAggregate.bias();
reset(); setLossFunction(new SelectedTag(Integer.parseInt(lossString), TAGS_SELECTION)); setOutputProbsForSVM(Utils.getFlag("output-probs", options)); setLambda(Double.parseDouble(lambdaString)); setLearningRate(Double.parseDouble(learningRateString)); setEpochs(Integer.parseInt(epochsString)); setUseWordFrequencies(Utils.getFlag("W", options)); setPeriodicPruning(Integer.parseInt(pruneFreqS)); setMinWordFrequency(Double.parseDouble(minFreq)); setMinAbsoluteCoefficientValue(Double.parseDouble(minCoeff)); setNormalizeDocLength(Utils.getFlag("normalize", options)); setNorm(Double.parseDouble(normFreqS)); setLNorm(Double.parseDouble(lnormFreqS)); setLowercaseTokens(Utils.getFlag("lowercase", options));
options.add("" + getLossFunction().getSelectedTag().getID()); if (getOutputProbsForSVM()) { options.add("-output-probs"); options.add("" + getLearningRate()); options.add("-R"); options.add("" + getLambda()); options.add("-E"); options.add("" + getEpochs()); if (getUseWordFrequencies()) { options.add("-W"); options.add("" + getPeriodicPruning()); options.add("-M"); options.add("" + getMinWordFrequency()); options.add("" + getMinAbsoluteCoefficientValue()); if (getNormalizeDocLength()) { options.add("-normalize"); options.add("" + getNorm()); options.add("-lnorm"); options.add("" + getLNorm()); if (getLowercaseTokens()) { options.add("-lowercase"); if (getStopwordsHandler() != null) { options.add("-stopwords-handler");
reset(); getCapabilities().testWithFail(data); initializeSVMProbs(data); data.randomize(new Random(getSeed())); train(data); pruneDictionary(true);
tokenizeInstance(instance, updateDictionary); double pred = svmOutput(); double[] vals = new double[2]; vals[0] = pred; double wx = dotProd(m_inputVector); double y = (instance.classValue() == 0) ? -1 : 1; double z = y * (wx + m_bias); double dloss = dloss(z); double factor = m_learningRate * y * dloss;
/** * Main method for testing this class. */ public static void main(String[] args) { runClassifier(new SGDText(), args); } }
double[] result = new double[2]; tokenizeInstance(inst, false); double wx = dotProd(m_inputVector); double z = (wx + m_bias);
throw new Exception("No model built yet, can't aggregate"); LinkedHashMap<String, SGDText.Count> tempDict = toAggregate.getDictionary(); m_bias += toAggregate.bias();
/** Creates a default SGDText */ public Classifier getClassifier() { SGDText p = new SGDText(); p.setEpochs(1); p.setLearningRate(0.001); return p; }
protected double svmOutput() { double wx = dotProd(m_inputVector); double z = (wx + m_bias); return z; }