@Override public void fillWeightVector(WeightVector w) { w.addSparseFeatureVector(fv, y * alpha); }
@Override public void solveSubProblemAndUpdateW(L2SolverInfo si, WeightVector w) { double C = sC; double dot_product = w.dotProduct(fv); double xij_norm2 = fv.l2NormSqure(); double NG = 1.0 - y * dot_product - (alpha / (2.0 * C)); double PG = -NG; if (alpha == 0f) { PG = Math.min(-NG, 0); } si.PGmax_new = Math.max(si.PGmax_new, PG); si.PGmin_new = Math.min(si.PGmin_new, PG); if (Math.abs(PG) > UPDATE_CONDITION) { double step = NG / (xij_norm2 + (1.0 / (2.0 * C))); double new_alpha = Math.max(alpha + step, 0);// make sure // alpha_[i][j] // is w.addSparseFeatureVector(fv, (new_alpha - alpha) * y); alpha = new_alpha; } }
@Override public void fillWeightVector(WeightVector w) { for (Pair<Double, FeatureVector> p : alphafv_list) { double alpha = p.getFirst(); FeatureVector fv = p.getSecond(); w.addSparseFeatureVector(fv, y * alpha); } }
@Override public void fillWeightVector(WeightVector w) { // for (Pair<double[], FeatureVector> p : alphafv_map.values()) { for (Pair<double[], FeatureVector> p : al_fv_list) { double alpha = p.getFirst()[0]; FeatureVector fv = p.getSecond(); w.addSparseFeatureVector(fv, alpha); } }
double new_alpha = Math.max(alpha + step, 0); sum_alpha += (new_alpha - alpha); w.addSparseFeatureVector(fv, y * (new_alpha - alpha)); p.setFirst(new_alpha);
double new_alpha = Math.max(alpha + step, 0); sum_alpha += (new_alpha - alpha); w.addSparseFeatureVector(fv, (new_alpha - alpha)); double[] alpha_loss = p.getFirst(); alpha_loss[0] = new_alpha;