/** * Centers the matrix in feature space according to Smola et Schoelkopf, * Learning with Kernels p. 431 Alters the input matrix. If you still need the * original matrix, use * <code>centeredMatrix = centerKernelMatrix(uncenteredMatrix.copy()) {</code> * * @param matrix the matrix to be centered * @return centered matrix (for convenience) */ public static double[][] centerMatrix(final double[][] matrix) { // FIXME: implement more efficiently. Maybe in matrix class itself? final double[][] normalizingMatrix = new double[matrix.length][matrix[0].length]; for(double[] row : normalizingMatrix) { Arrays.fill(row, 1.0 / matrix[0].length); } return times(plusEquals(minusEquals(minus(matrix, times(normalizingMatrix, matrix)), times(matrix, normalizingMatrix)), times(normalizingMatrix, matrix)), normalizingMatrix); }
/** * Multiply a matrix by a scalar component-wise, m3 = m1 * s1. * * @param m1 Input matrix * @param s1 scalar * @return m1 * s1, in a new matrix */ public static double[][] times(final double[][] m1, final double s1) { return timesEquals(copy(m1), s1); }
/** * Compute the Mahalanobis distance using the given weight matrix. * * @param weightMatrix Weight Matrix * @param o1_minus_o2 Delta vector * @return Mahalanobis distance */ public static double mahalanobisDistance(double[][] weightMatrix, double[] o1_minus_o2) { double sqrDist = VMath.transposeTimesTimes(o1_minus_o2, weightMatrix, o1_minus_o2); if(sqrDist < 0 && Math.abs(sqrDist) < 0.000000001) { sqrDist = Math.abs(sqrDist); } return Math.sqrt(sqrDist); }
/** * Component-wise matrix operation: m3 = m1 + m2 * s2. * * @param m1 Input matrix * @param m2 another matrix * @param s2 scalar * @return m1 + m2 * s2 in a new matrix */ public static double[][] plusTimes(final double[][] m1, final double[][] m2, final double s2) { return plusTimesEquals(copy(m1), m2, s2); }
/** * Component-wise matrix sum: m3 = m1 + m2. * * @param m1 Input matrix * @param m2 another matrix * @return m1 + m1 in a new Matrix */ public static double[][] plus(final double[][] m1, final double[][] m2) { return plusEquals(copy(m1), m2); }
/** * Returns an orthonormalization of this matrix. * * @param m1 Input matrix * @return the orthonormalized matrix */ public static double[][] orthonormalize(final double[][] m1) { final int columndimension = getColumnDimensionality(m1); final double[][] v = copy(m1); // FIXME: optimize - excess copying! for(int i = 1; i < columndimension; i++) { final double[] u_i = getCol(m1, i); final double[] sum = new double[m1.length]; for(int j = 0; j < i; j++) { final double[] v_j = getCol(v, j); final double scalar = scalarProduct(u_i, v_j) / scalarProduct(v_j, v_j); plusEquals(sum, times(v_j, scalar)); } final double[] v_i = minus(u_i, sum); setCol(v, i, v_i); } normalizeColumns(v); return v; }
double[] first = normalizeEquals(vectors.get(0)); double[][] ret = new double[first.length][vectors.size()]; setCol(ret, 0, first); for(int i = 1; i < vectors.size(); i++) { double[] v_j = getCol(ret, j); // Must have length 1! double f = transposeTimes(v_i, v_j); // / transposeTimes(v_j, v_j); if(Double.isNaN(f)) { if(LOG.isDebuggingFine()) { minusTimesEquals(u_i, v_j, f); final double len_u_i = euclideanLength(u_i); if(len_u_i < Double.MIN_NORMAL) { if(LOG.isDebuggingFine()) { timesEquals(u_i, 1 / len_u_i); setCol(ret, i, u_i);
/** * Build a convex hull to approximate the sphere. * * @param pc Principal components * @return Polygon */ protected Polygon makeHull(double[][] pc) { FilteredConvexHull2D hull = new FilteredConvexHull2D(); double[] diag = new double[] { 0, 0 }; for(int j = 0; j < pc.length; j++) { hull.add(pc[j]); hull.add(times(pc[j], -1)); for(int k = j + 1; k < pc.length; k++) { double[] q = pc[k]; double[] ppq = timesEquals(plus(pc[j], q), MathUtil.SQRTHALF); double[] pmq = timesEquals(minus(pc[j], q), MathUtil.SQRTHALF); hull.add(ppq); hull.add(times(ppq, -1)); hull.add(pmq); hull.add(times(pmq, -1)); } plusEquals(diag, pc[j]); } timesEquals(diag, 1.0 / FastMath.sqrt(pc.length)); hull.add(diag); hull.add(times(diag, -1)); return hull.getHull(); }
/** * Computes the loglikelihood of all normal objects. Gaussian model * * @param objids Object IDs for 'normal' objects. * @param builder Covariance matrix builder * @param relation Database * @return loglikelihood for normal objects */ private double loglikelihoodNormal(DBIDs objids, SetDBIDs anomalous, CovarianceMatrix builder, Relation<V> relation) { double[] mean = builder.getMeanVector(); final LUDecomposition lu = new LUDecomposition(builder.makeSampleMatrix()); double[][] covInv = lu.inverse(); // for each object compute probability and sum double prob = (objids.size() - anomalous.size()) * -FastMath.log(FastMath.sqrt(MathUtil.powi(MathUtil.TWOPI, RelationUtil.dimensionality(relation)) * lu.det())); for(DBIDIter iter = objids.iter(); iter.valid(); iter.advance()) { if(!anomalous.contains(iter)) { double[] xcent = minusEquals(relation.get(iter).toArray(), mean); prob -= .5 * transposeTimesTimes(xcent, covInv, xcent); } } return prob; }
/** * Testing the normalizeVector) and normalizeEquals(Vector) methods of the * {@link VMath} class. */ @Test public void testNormalize() { final double[] v1 = copy(TESTVEC), v1_copy = copy(v1); final double[] v1_normal = normalize(v1); // Test that both methods return a vector with length 1. // That more methods ensure the result we use squareSum instead of // euclideanLength here assertEquals(1, squareSum(v1_normal), 0.); assertEquals(1, squareSum(normalizeEquals(v1)), 0.); // Check that both methods return the same Vector assertArrayEquals(v1_normal, v1, 0.); // Check that the normalize Vector times the euclidean length of the // original Vector equals the original vector assertArrayEquals(v1_copy, times(v1_normal, euclideanLength(v1_copy)), 1e-15); }
/** * Returns the error vectors after projection. * * @param p a vector in the space underlying this solution * @return the error vectors */ public double[] errorVector(V p) { return weakEigenvectors.length > 0 ? times(weakEigenvectors, transposeTimes(weakEigenvectors, minusEquals(p.toArray(), centroid))) : EMPTY_VECTOR; }
@Override public void cleanup(Processor.Instance inst) { @SuppressWarnings("unchecked") Instance<V> instance = (Instance<V>) inst; synchronized(this) { changed |= instance.changed; for(int i = 0; i < centroids.length; i++) { int sizeb = instance.sizes[i]; if(sizeb == 0) { continue; } int sizea = sizes[i]; double sum = sizea + sizeb; double[] cent = centroids[i]; if(sizea > 0) { timesEquals(cent, sizea / sum); } plusTimesEquals(cent, instance.centroids[i], 1. / sum); sizes[i] += sizeb; plusEquals(varsum, instance.varsum); } } }
/** * Inserts the specified vector into the given orthonormal matrix * <code>v</code> at column <code>corrDim</code>. After insertion the matrix * <code>v</code> is orthonormalized and column <code>corrDim</code> of matrix * <code>e_czech</code> is set to the <code>corrDim</code>-th unit vector. * * @param v the orthonormal matrix of the eigenvectors * @param vector the vector to be inserted * @param corrDim the column at which the vector should be inserted */ private void adjust(double[][] v, double[] vector, int corrDim) { double[] sum = new double[v.length]; for(int k = 0; k < corrDim; k++) { plusTimesEquals(sum, v[k], transposeTimes(vector, v[k])); } v[corrDim] = normalizeEquals(minus(vector, sum)); }
/** * Compute the Mahalanobis distance from the centroid for a given vector. * * Note: used by P3C. * * @param vec Vector * @return Mahalanobis distance */ public double mahalanobisDistance(NumberVector vec) { // TODO: this allocates one array. return squareSum(chol.solveLInplace(minusEquals(vec.toArray(), mean))); }
/** * Component-wise matrix subtraction m3 = m1 - m2. * * @param m1 Input matrix * @param m2 another matrix * @return m1 - m2 in a new matrix */ public static double[][] minus(final double[][] m1, final double[][] m2) { return minusEquals(copy(m1), m2); }
@Override public double[] projectScaledToRender(double[] v) { v = rearrange(v); VMath.minusEquals(v, .5); v = flipSecondEquals(v); VMath.timesEquals(v, SCALE); return v; }
@Override public double[] projectRenderToScaled(double[] v) { v = VMath.times(v, INVSCALE); v = flipSecondEquals(v); VMath.plusEquals(v, .5); v = dearrange(v); return v; }
/** * Fake Voronoi diagram. For two means only * * @param proj Projection * @param means Mean vectors * @return SVG path */ public static SVGPath drawFakeVoronoi(Projection2D proj, List<double[]> means) { CanvasSize viewport = proj.estimateViewport(); final SVGPath path = new SVGPath(); // Difference final double[] dirv = VMath.minus(means.get(1), means.get(0)); VMath.rotate90Equals(dirv); double[] dir = proj.fastProjectRelativeDataToRenderSpace(dirv); // Mean final double[] mean = VMath.plus(means.get(0), means.get(1)); VMath.timesEquals(mean, 0.5); double[] projmean = proj.fastProjectDataToRenderSpace(mean); double factor = viewport.continueToMargin(projmean, dir); path.moveTo(projmean[0] + factor * dir[0], projmean[1] + factor * dir[1]); // Inverse direction: dir[0] *= -1; dir[1] *= -1; factor = viewport.continueToMargin(projmean, dir); path.drawTo(projmean[0] + factor * dir[0], projmean[1] + factor * dir[1]); return path; } }
@Test public void testAsymmetric() { double[][] a = transpose(new double[][] { { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } }); EigenvalueDecomposition ev = new EigenvalueDecomposition(a); double[][] v = ev.getV(), d = ev.getD(); assertEquals("Asymmetric decomposition", 0., normF(minus(times(a, v), times(v, d))), 1e-13); } }
@Test public void testJamaExample() { double[][] M = transpose(new double[][] { { 1., 2., 3., 4. }, { 5., 6., 7., 8. }, { 9., 10., 11., 12. } }); QRDecomposition qr = new QRDecomposition(M); final double[][] q = qr.getQ(), r = qr.getR(); assertTrue(almostEquals(unitMatrix(q[0].length), transposeTimes(q, q))); checkTriangular(r); assertTrue("Not a proper decomposition.", almostEquals(M, times(q, r), 1e-14)); }