/** * Returns a new, empty weight vector with the same parameter settings as this one. * * @return An empty weight vector. **/ public SparseWeightVector emptyClone() { return new BiasedRandomWeightVector(stddev); } }
/** * Outputs a textual representation of this vector to the specified stream. The string * representation is the same as in {@link SparseWeightVector#write(PrintStream)}, with two * added lines just after the <code>"Begin"</code> annotation that give the values of * {@link #stddev} and {@link #bias}. * * @param out The stream to write to. **/ public void write(PrintStream out) { out.println("Begin BiasedRandomWeightVector"); out.println("stddev = " + stddev); out.println("bias = " + bias); toStringJustWeights(out); out.println("End BiasedRandomWeightVector"); }
conjunctiveLabels |= labelLexicon.lookupKey(label).isConjunctive(); while (N++ <= label) network.add(new BiasedRandomWeightVector()); isLabel[i] = i == label; w[i] = (BiasedRandomWeightVector) network.get(i); scores[i] = w[i].dot(exampleFeatures, exampleValues) / norm2; min = Math.min(min, scores[i]); max = Math.max(max, scores[i]); double t = getMultiplier(min, scores[i], isLabel[i]); if (!nearlyEqualTo(t, 0)) w[i].scaledAdd(exampleFeatures, exampleValues, t);
/** * Returns the classification of the given example as a single feature instead of a * {@link FeatureVector}. * * @param f The features array. * @param v The values array. * @return The classification of the example as a feature. **/ public Feature featureValue(int[] f, double[] v) { double bestScore = Double.NEGATIVE_INFINITY; int bestLabel = -1; int N = network.size(); for (int l = 0; l < N; l++) { double score = ((BiasedRandomWeightVector) network.get(l)).dot(f, v); if (score > bestScore) { bestLabel = l; bestScore = score; } } if (bestLabel == -1) return null; return predictions.get(bestLabel); }
/** * Writes the algorithm's internal representation as text. * * @param out The output stream. **/ public void write(PrintStream out) { int N = network.size(); for (int i = 0; i < N; ++i) { out.println("label: " + predictions.get(i).getStringValue()); ((BiasedRandomWeightVector) network.get(i)).write(out, lexicon); } out.println("End of SparseMIRA"); }
/** Returns a deep clone of this learning algorithm. */ public Object clone() { SparseMIRA clone = null; try { clone = (SparseMIRA) super.clone(); } catch (Exception e) { System.err.println("Error cloning SparseMIRA: " + e); e.printStackTrace(); System.exit(1); } int N = network.size(); clone.network = new OVector(N); for (int i = 0; i < N; ++i) clone.network.add(((BiasedRandomWeightVector) network.get(i)).clone()); return clone; }
/** * Writes the learned function's internal representation in binary form. * * @param out The output stream. **/ public void write(ExceptionlessOutputStream out) { super.write(out); int N = network.size(); out.writeInt(N); for (int i = 0; i < N; ++i) ((BiasedRandomWeightVector) network.get(i)).write(out); }
int key = labelLexicon.lookup(f); score = ((BiasedRandomWeightVector) network.get(key)).dot(exampleFeatures, exampleValues); result.put(label.toString(), score); for (int l = 0; l < N; l++) { double score = ((BiasedRandomWeightVector) network.get(l)).dot(exampleFeatures, exampleValues); result.put(labelLexicon.lookupKey(l).getStringValue(), score);
/** * Outputs a textual representation of this vector to the specified stream. The string * representation is the same as in {@link SparseWeightVector#write(PrintStream)}, with two * added lines just after the <code>"Begin"</code> annotation that give the values of * {@link #stddev} and {@link #bias}. * * @param out The stream to write to. * @param lex The feature lexicon. **/ public void write(PrintStream out, Lexicon lex) { out.println("Begin BiasedRandomWeightVector"); out.println("stddev = " + stddev); out.println("bias = " + bias); toStringJustWeights(out, 0, lex); out.println("End BiasedRandomWeightVector"); }
/** * Produces a set of scores indicating the degree to which each possible discrete classification * value is associated with the given example object. These scores are just the dot product of * each weight vector with the example vector. * * @param exampleFeatures The example's array of feature indices. * @param exampleValues The example's array of feature values. **/ public ScoreSet scores(int[] exampleFeatures, double[] exampleValues) { ScoreSet result = new ScoreSet(); int N = network.size(); for (int l = 0; l < N; l++) { double score = ((BiasedRandomWeightVector) network.get(l)).dot(exampleFeatures, exampleValues); result.put(labelLexicon.lookupKey(l).getStringValue(), score); } return result; }