private static double cosine(float[] normalizedVector1, float[] normalizedVector2) { double inner = ArrayMath.innerProduct(normalizedVector1, normalizedVector2); return inner; }
/** * Standardize values in this array, i.e., subtract the mean and divide by the standard deviation. * If standard deviation is 0.0, throws a RuntimeException. */ public static void standardize(double[] a) { double m = mean(a); if (Double.isNaN(m)) { throw new RuntimeException("Can't standardize array whose mean is NaN"); } double s = stdev(a); if (s == 0.0 || Double.isNaN(s)) { throw new RuntimeException("Can't standardize array whose standard deviation is 0.0 or NaN"); } addInPlace(a, -m); // subtract mean multiplyInPlace(a, 1.0/s); // divide by standard deviation }
/** * Returns the Jensen Shannon divergence (information radius) between * a and b, defined as the average of the kl divergences from a to b * and from b to a. */ public static double jensenShannonDivergence(double[] a, double[] b) { double[] average = pairwiseAdd(a, b); multiplyInPlace(average, .5); return .5 * klDivergence(a, average) + .5 * klDivergence(b, average); }
protected static double[] smooth(List<double[]> toSmooth){ double[] smoothed = new double[toSmooth.get(0).length]; for(double[] thisArray:toSmooth){ ArrayMath.pairwiseAddInPlace(smoothed,thisArray); } ArrayMath.multiplyInPlace(smoothed,1/((double) toSmooth.size() )); return smoothed; }
/** * Makes the values in this array sum to 1.0. Does it in place. * If the total is 0.0 or NaN, throws an RuntimeException. */ public static void normalize(float[] a) { float total = sum(a); if (total == 0.0f || Double.isNaN(total)) { throw new RuntimeException("Can't normalize an array with sum 0.0 or NaN"); } multiplyInPlace(a, 1.0f/total); // divide each value by total } public static void L2normalize(float[] a) {
/** * Samples a single position in the sequence. * Does not modify the sequence passed in. * returns the score of the new label for the position to sample * @param sequence the sequence to start with * @param pos the position to sample. * @param temperature the temperature to control annealing */ private Pair<Integer, Double> samplePositionHelper(SequenceModel model, int[] sequence, int pos, double temperature) { double[] distribution = model.scoresOf(sequence, pos); if (temperature!=1.0) { if (temperature==0.0) { // set the max to 1.0 int argmax = ArrayMath.argmax(distribution); Arrays.fill(distribution, Double.NEGATIVE_INFINITY); distribution[argmax] = 0.0; } else { // take all to a power // use the temperature to increase/decrease the entropy of the sampling distribution ArrayMath.multiplyInPlace(distribution, 1.0/temperature); } } ArrayMath.logNormalize(distribution); ArrayMath.expInPlace(distribution); int newTag = ArrayMath.sampleFromDistribution(distribution, random); double newProb = distribution[newTag]; return new Pair<>(newTag, newProb); }
public static void L1normalize(double[] a) { double total = L1Norm(a); if (total == 0.0 || Double.isNaN(total)) { if (a.length < 100) { throw new RuntimeException("Can't normalize an array with sum 0.0 or NaN: " + Arrays.toString(a)); } else { double[] aTrunc = new double[100]; System.arraycopy(a, 0, aTrunc, 0, 100); throw new RuntimeException("Can't normalize an array with sum 0.0 or NaN: " + Arrays.toString(aTrunc) + " ... "); } } multiplyInPlace(a, 1.0/total); // divide each value by total } public static void L2normalize(double[] a) {
/** * Makes the values in this array sum to 1.0. Does it in place. * If the total is 0.0, throws a RuntimeException. * If the total is Double.NEGATIVE_INFINITY, then it replaces the * array with a normalized uniform distribution. CDM: This last bit is * weird! Do we really want that? */ public static void logNormalize(double[] a) { double logTotal = logSum(a); if (logTotal == Double.NEGATIVE_INFINITY) { // to avoid NaN values double v = -Math.log(a.length); for (int i = 0; i < a.length; i++) { a[i] = v; } return; } addInPlace(a, -logTotal); // subtract log total from each value }
double[] newX = ArrayMath.pairwiseSubtract(x, means); val += otherPrior.compute(newX, grad); return val; double norm = ArrayMath.norm_1(x) / sigmaSq; double d; if (norm > 30.0) {
System.arraycopy(thisFunc.derivativeAt(x,x,thisFunc.dataDimension()),0,thisGrad,0,thisGrad.length); System.arraycopy(thisFunc.HdotVAt(x,x,thisGrad,thisFunc.dataDimension()),0,fullHx,0,fullHx.length); double fullNorm = ArrayMath.norm(fullHx); double hessScale = ((double) thisFunc.dataDimension()) / ((double) batchSize); thisFunc.sampleMethod = AbstractStochasticCachingDiffFunction.SamplingMethod.RandomWithReplacement; System.arraycopy(thisFunc.derivativeAt(x,x,batchSize),0,thisGrad,0,thisGrad.length); System.arraycopy(thisFunc.HdotVAt(x,x,thisGrad,batchSize),0,thisHx,0,thisHx.length); ArrayMath.multiplyInPlace(thisHx,hessScale); double thisNorm = ArrayMath.norm(thisHx); double sim = ArrayMath.innerProduct(thisHx,fullHx)/(thisNorm*fullNorm); double rat = thisNorm/fullNorm; k += 1;
private void computeDir(double[] dir, double[] fg) throws SQNMinimizer.SurpriseConvergence { System.arraycopy(fg, 0, dir, 0, fg.length); int mmm = sList.size(); double[] as = new double[mmm]; double[] factors = new double[dir.length]; for (int i = mmm - 1; i >= 0; i--) { as[i] = roList.get(i) * ArrayMath.innerProduct(sList.get(i), dir); plusAndConstMult(dir, yList.get(i), -as[i], dir); } // multiply by hessian approximation if (mmm != 0) { double[] y = yList.get(mmm - 1); double yDotY = ArrayMath.innerProduct(y, y); if (yDotY == 0) { throw new SQNMinimizer.SurpriseConvergence("Y is 0!!"); } double gamma = ArrayMath.innerProduct(sList.get(mmm - 1), y) / yDotY; ArrayMath.multiplyInPlace(dir, gamma); }else if(mmm == 0){ //This is a safety feature preventing too large of an initial step (see Yu Schraudolph Gunter) ArrayMath.multiplyInPlace(dir,epsilon); } for (int i = 0; i < mmm; i++) { double b = roList.get(i) * ArrayMath.innerProduct(yList.get(i), dir); plusAndConstMult(dir, sList.get(i), cPosDef*as[i] - b, dir); plusAndConstMult(ArrayMath.pairwiseMultiply(yList.get(i),sList.get(i)),factors,1,factors); } ArrayMath.multiplyInPlace(dir, -1); }
public static double max(double[] a) { return a[argmax(a)]; }
/** * Returns the sum of an array of doubles. */ public static double sum(double[] a) { return sum(a,0,a.length); }
/** * computeDir() * * This function will calculate an approximation of the inverse hessian based * off the seen s,y vector pairs. This particular approximation uses the BFGS * update. */ private void computeDir(double[] dir, double[] fg, double[] x, QNInfo qn, Function func, StringBuilder sb) throws SurpriseConvergence { System.arraycopy(fg, 0, dir, 0, fg.length); int mmm = qn.size(); double[] as = new double[mmm]; for (int i = mmm - 1; i >= 0; i--) { as[i] = qn.getRho(i) * ArrayMath.innerProduct(qn.getS(i), dir); plusAndConstMult(dir, qn.getY(i), -as[i], dir); } // multiply by hessian approximation qn.applyInitialHessian(dir, sb); for (int i = 0; i < mmm; i++) { double b = qn.getRho(i) * ArrayMath.innerProduct(qn.getY(i), dir); plusAndConstMult(dir, qn.getS(i), as[i] - b, dir); } ArrayMath.multiplyInPlace(dir, -1); if (useOWLQN) { // step (2) in Galen & Gao 2007 constrainSearchDir(dir, fg, x, func); } }
/** {@inheritDoc} */ @Override public double[] scoresOf(int[] sequence, int pos) { if(models != null){ double[] dist = ArrayMath.multiply(models[0].scoresOf(sequence, pos),wts[0]); for(int i = 1; i < models.length; i++){ double[] dist_i = models[i].scoresOf(sequence, pos); ArrayMath.addMultInPlace(dist,dist_i,wts[i]); } return dist; } double[] dist1 = model1.scoresOf(sequence, pos); double[] dist2 = model2.scoresOf(sequence, pos); double[] dist = new double[dist1.length]; for(int i = 0; i < dist1.length; i++) dist[i] = model1Wt*dist1[i] + model2Wt*dist2[i]; return dist; }
/** * Merge the given {@code Cost} data with the data in this * instance. * * @param otherCost */ public void merge(Cost otherCost) { this.cost += otherCost.getCost(); this.percentCorrect += otherCost.getPercentCorrect(); ArrayMath.addInPlace(gradW1, otherCost.getGradW1()); ArrayMath.pairwiseAddInPlace(gradb1, otherCost.getGradb1()); ArrayMath.addInPlace(gradW2, otherCost.getGradW2()); ArrayMath.addInPlace(gradE, otherCost.getGradE()); }
public static float min(float[] a) { return a[argmin(a)]; }
public static void L2normalize(double[] a) { double total = L2Norm(a); if (total == 0.0 || Double.isNaN(total)) { if (a.length < 100) { throw new RuntimeException("Can't normalize an array with sum 0.0 or NaN: " + Arrays.toString(a)); } else { double[] aTrunc = new double[100]; System.arraycopy(a, 0, aTrunc, 0, 100); throw new RuntimeException("Can't normalize an array with sum 0.0 or NaN: " + Arrays.toString(aTrunc) + " ... "); } } multiplyInPlace(a, 1.0/total); // divide each value by total }