protected Matrix updateBias(Matrix biasGrad, double biasLossWeight) { final Matrix newbias = MatlibMatrixUtils.minus( this.bias, MatlibMatrixUtils.scaleInplace( biasGrad, biasLossWeight ) ); return newbias; } protected Matrix updateW(Matrix currentW, double wLossWeighted, double weightedLambda) {
protected Matrix updateU(Matrix currentU, double uLossWeight, double uWeightedLambda) { final Matrix gradU = loss.gradient(currentU); MatlibMatrixUtils.scaleInplace(gradU,uLossWeight); Matrix newu = MatlibMatrixUtils.minus(currentU,gradU); newu = regul.prox(newu, uWeightedLambda); return newu; } private double lambdat(int iter, double lambda) {
protected Matrix updateW(Matrix currentW, double wLossWeighted, double weightedLambda) { final Matrix gradW = loss.gradient(currentW); MatlibMatrixUtils.scaleInplace(gradW,wLossWeighted); Matrix neww = MatlibMatrixUtils.minus(currentW,gradW); neww = regul.prox(neww, weightedLambda); return neww; } protected Matrix updateU(Matrix currentU, double uLossWeight, double uWeightedLambda) {
MatlibMatrixUtils.plusInplace(laplacian, MatlibMatrixUtils.scaleInplace(ujujSum, conf.lambda));
MatlibMatrixUtils.scaleInplace(this.w,weighting); MatlibMatrixUtils.scaleInplace(this.u,weighting); if(this.biasMode){ MatlibMatrixUtils.scaleInplace(this.bias,weighting);
private void updateKinv(Vector d_optimal, double delta) { Matrix newKinv = null; if(this.supports.size() > 1){ Vector expandD = Vector.dense(d_optimal.size() + 1); Matrix expandDMat = DenseMatrix.dense(expandD.size(), 1); MatlibMatrixUtils.setSubVector(expandDMat.column(0), 0, d_optimal); expandDMat.column(0).put(d_optimal.size(), -1); newKinv = new DenseMatrix(Kinv.rowCount()+1, Kinv.columnCount() + 1); MatlibMatrixUtils.setSubMatrix(newKinv, 0, 0, Kinv); Matrix expandDMult = newKinv.newInstance(); MatlibMatrixUtils.dotProductTranspose(expandDMat, expandDMat, expandDMult); MatlibMatrixUtils.scaleInplace(expandDMult, 1/delta); MatlibMatrixUtils.plusInplace(newKinv, expandDMult); } else { double[] only = this.supports.get(0); newKinv = DenseMatrix.dense(1, 1); newKinv.put(0, 0, 1/this.kernel.apply(IndependentPair.pair(only,only))); } this.Kinv = newKinv; }
private void updateKinv(Vector d_optimal, double delta) { Matrix newKinv = null; // We're updating Kinv by calculating: [ Kinv 0; 0... 0] + (1/delta) [d -1]' . [d -1] // construct the column vector matrix [d -1]' Matrix expandDMat = DenseMatrix.dense(d_optimal.size() + 1, 1); MatlibMatrixUtils.setSubVector(expandDMat.column(0), 0, d_optimal); expandDMat.column(0).put(d_optimal.size(), -1); // construct a new, expanded Kinv matrix newKinv = new DenseMatrix(Kinv.rowCount()+1, Kinv.columnCount() + 1); MatlibMatrixUtils.setSubMatrix(newKinv, 0, 0, Kinv); // construct [d -1]' [d -1] Matrix expandDMult = newKinv.newInstance(); MatlibMatrixUtils.dotProductTranspose(expandDMat, expandDMat, expandDMult); // scale the new matrix by 1/delta MatlibMatrixUtils.scaleInplace(expandDMult, 1/delta); // add it to the new Kinv MatlibMatrixUtils.plusInplace(newKinv, expandDMult); this.Kinv = newKinv; }
MatlibMatrixUtils.scaleInplace(P, C); // P = C * Kbi . Kbi^T