public static VectorIterable pairwiseInnerProducts(Iterable<MatrixSlice> basis) { DenseMatrix out = null; for (MatrixSlice slice1 : basis) { List<Double> dots = Lists.newArrayList(); for (MatrixSlice slice2 : basis) { dots.add(slice1.vector().dot(slice2.vector())); } if (out == null) { out = new DenseMatrix(dots.size(), dots.size()); } for (int i = 0; i < dots.size(); i++) { out.set(slice1.index(), i, dots.get(i)); } } return out; }
@Test public void testGetValues() { DenseMatrix m = new DenseMatrix(10, 10); for (int i = 0; i < 10; i++) { for (int j = 0; j < 10; j++) { m.set(i, j, 10 * i + j); } } double[][] values = m.getBackingStructure(); Assert.assertEquals(values.length, 10); Assert.assertEquals(values[0].length, 10); Assert.assertEquals(values[9][9], 99.0, 0.0); }
public static VectorIterable pairwiseInnerProducts(Iterable<MatrixSlice> basis) { DenseMatrix out = null; for (MatrixSlice slice1 : basis) { List<Double> dots = Lists.newArrayList(); for (MatrixSlice slice2 : basis) { dots.add(slice1.vector().dot(slice2.vector())); } if (out == null) { out = new DenseMatrix(dots.size(), dots.size()); } for (int i = 0; i < dots.size(); i++) { out.set(slice1.index(), i, dots.get(i)); } } return out; }
public static VectorIterable pairwiseInnerProducts(Iterable<MatrixSlice> basis) { DenseMatrix out = null; for (MatrixSlice slice1 : basis) { List<Double> dots = Lists.newArrayList(); for (MatrixSlice slice2 : basis) { dots.add(slice1.vector().dot(slice2.vector())); } if (out == null) { out = new DenseMatrix(dots.size(), dots.size()); } for (int i = 0; i < dots.size(); i++) { out.set(slice1.index(), i, dots.get(i)); } } return out; }
/** * Returns a matrix related to the confusion matrix and to the log-likelihood. For a * pretty accurate classifier, N + entropy is nearly the same as the confusion matrix * because log(1-eps) \approx -eps if eps is small. * * For lower accuracy classifiers, this measure will give us a better picture of how * things work our. * * Also, by definition, log-likelihood = sum(diag(entropy)) * @return Returns a cell by cell break-down of the log-likelihood */ public Matrix entropy() { if (!hasScore) { // find a constant score that would optimize log-likelihood, but use a dash of Bayesian // conservatism to avoid dividing by zero or taking log(0) double p = (0.5 + confusion.get(1, 1)) / (1 + confusion.get(0, 0) + confusion.get(1, 1)); entropy.set(0, 0, confusion.get(0, 0) * Math.log1p(-p)); entropy.set(0, 1, confusion.get(0, 1) * Math.log(p)); entropy.set(1, 0, confusion.get(1, 0) * Math.log1p(-p)); entropy.set(1, 1, confusion.get(1, 1) * Math.log(p)); } return entropy; }
/** * Returns a matrix related to the confusion matrix and to the log-likelihood. For a * pretty accurate classifier, N + entropy is nearly the same as the confusion matrix * because log(1-eps) \approx -eps if eps is small. * * For lower accuracy classifiers, this measure will give us a better picture of how * things work our. * * Also, by definition, log-likelihood = sum(diag(entropy)) * @return Returns a cell by cell break-down of the log-likelihood */ public Matrix entropy() { if (!hasScore) { // find a constant score that would optimize log-likelihood, but use a dash of Bayesian // conservatism to avoid dividing by zero or taking log(0) double p = (0.5 + confusion.get(1, 1)) / (1 + confusion.get(0, 0) + confusion.get(1, 1)); entropy.set(0, 0, confusion.get(0, 0) * Math.log1p(-p)); entropy.set(0, 1, confusion.get(0, 1) * Math.log(p)); entropy.set(1, 0, confusion.get(1, 0) * Math.log1p(-p)); entropy.set(1, 1, confusion.get(1, 1) * Math.log(p)); } return entropy; }
/** * Returns a matrix related to the confusion matrix and to the log-likelihood. For a * pretty accurate classifier, N + entropy is nearly the same as the confusion matrix * because log(1-eps) \approx -eps if eps is small. * * For lower accuracy classifiers, this measure will give us a better picture of how * things work our. * * Also, by definition, log-likelihood = sum(diag(entropy)) * @return Returns a cell by cell break-down of the log-likelihood */ public Matrix entropy() { if (!hasScore) { // find a constant score that would optimize log-likelihood, but use a dash of Bayesian // conservatism to avoid dividing by zero or taking log(0) double p = (0.5 + confusion.get(1, 1)) / (1 + confusion.get(0, 0) + confusion.get(1, 1)); entropy.set(0, 0, confusion.get(0, 0) * Math.log1p(-p)); entropy.set(0, 1, confusion.get(0, 1) * Math.log(p)); entropy.set(1, 0, confusion.get(1, 0) * Math.log1p(-p)); entropy.set(1, 1, confusion.get(1, 1) * Math.log(p)); } return entropy; }