@Override public double apply(double input) { return MathUtils.guardedLogarithm(1 + FastMath.exp(input)); }
@Override public double calculateLoss(DoubleVector y, DoubleVector hypothesis) { return y.multiply(MathUtils.logVector(hypothesis)).sum(); }
/** * Normalizes the given dataset (inplace), by subtracting the mean and * dividing by the stddev. Dataset will have 0 mean and stddev of 1. * * @return a tuple of the mean and the stddev. */ public static Tuple<DoubleVector, DoubleVector> meanNormalizeColumns( Dataset dataset) { return meanNormalizeColumns(dataset, (x) -> true); }
result.auc = MathUtils.computeAUC(outcomePredictedPairs); } else { int[][] confusionMatrix = new int[result.numLabels][result.numLabels]; DoubleVector outcomeVector = testOutcome[i]; result.logLoss += outcomeVector .multiply(MathUtils.logVector(predicted)).sum(); int outcomeClass = outcomeVector.maxIndex(); int prediction = classifier.extractPredictedClass(predicted);
@Override public double calculateLoss(DoubleMatrix y, DoubleMatrix hypothesis) { return y.multiplyElementWise(MathUtils.logMatrix(hypothesis)).sum() / y.getRowCount(); }
/** * Scales a vector into the interval given by min and max. * * @param input the input vector. * @param fromMin the lower bound of the input interval. * @param fromMax the upper bound of the input interval. * @param toMin the lower bound of the target interval. * @param toMax the upper bound of the target interval. * @return the new vector with scaled values. */ public static DoubleVector minMaxScale(DoubleVector input, double fromMin, double fromMax, double toMin, double toMax) { DoubleVector newOne = new DenseDoubleVector(input.getDimension()); double[] array = input.toArray(); for (int i = 0; i < array.length; i++) { newOne.set(i, minMaxScale(array[i], fromMin, fromMax, toMin, toMax)); } return newOne; }
@Override public double calculateLoss(DoubleMatrix y, DoubleMatrix hypothesis) { DoubleMatrix negativeOutcome = y.subtractBy(1.0d); DoubleMatrix inverseOutcome = y.multiply(-1d); DoubleMatrix negativeHypo = hypothesis.subtractBy(1d); DoubleMatrix negativeLogHypo = MathUtils.logMatrix(negativeHypo); DoubleMatrix positiveLogHypo = MathUtils.logMatrix(hypothesis); DoubleMatrix negativePenalty = negativeOutcome .multiplyElementWise(negativeLogHypo); DoubleMatrix positivePenalty = inverseOutcome .multiplyElementWise(positiveLogHypo); return (positivePenalty.subtract(negativePenalty)).sum() / y.getRowCount(); }
/** * Scales a matrix into the interval given by min and max. * * @param input the input value. * @param fromMin the lower bound of the input interval. * @param fromMax the upper bound of the input interval. * @param toMin the lower bound of the target interval. * @param toMax the upper bound of the target interval. * @return the new matrix with scaled values. */ public static DoubleMatrix minMaxScale(DoubleMatrix input, double fromMin, double fromMax, double toMin, double toMax) { DoubleMatrix newOne = new DenseDoubleMatrix(input.getRowCount(), input.getColumnCount()); double[][] array = input.toArray(); for (int row = 0; row < newOne.getRowCount(); row++) { for (int col = 0; col < newOne.getColumnCount(); col++) { newOne.set(row, col, minMaxScale(array[row][col], fromMin, fromMax, toMin, toMax)); } } return newOne; }
/** * @return a log'd matrix that was guarded against edge cases of the * logarithm. */ public static DoubleVector logVector(DoubleVector input) { DenseDoubleVector log = new DenseDoubleVector(input.getDimension()); for (int col = 0; col < log.getDimension(); col++) { log.set(col, guardedLogarithm(input.get(col))); } return log; }
@Override public double calculateLoss(DoubleVector y, DoubleVector hypothesis) { DoubleVector negativeOutcome = y.subtractFrom(1.0d); DoubleVector inverseOutcome = y.multiply(-1d); DoubleVector negativeHypo = hypothesis.subtractFrom(1d); DoubleVector negativeLogHypo = MathUtils.logVector(negativeHypo); DoubleVector positiveLogHypo = MathUtils.logVector(hypothesis); DoubleVector negativePenalty = negativeOutcome.multiply(negativeLogHypo); DoubleVector positivePenalty = inverseOutcome.multiply(positiveLogHypo); return (positivePenalty.subtract(negativePenalty)).sum(); }
@Override public DoubleVector calculateGradient(DoubleVector feature, DoubleVector y, DoubleVector hypothesis) { double error = y.subtract(hypothesis).sum(); if (error != 0d) { DoubleVector result = feature.deepCopy(); Iterator<DoubleVectorElement> iterateNonZero = feature.iterateNonZero(); while (iterateNonZero.hasNext()) { DoubleVectorElement next = iterateNonZero.next(); result.set(next.getIndex(), MathUtils.guardedLogarithm(next.getValue() + 1d) * error * -1d); } return result; } return new SequentialSparseDoubleVector(feature.getDimension()); } }
MathUtils.logVector(predictedVector)).sum(); int prediction = 0; if (threshold == null) {
/** * @return a log'd matrix that was guarded against edge cases of the * logarithm. */ public static DoubleMatrix logMatrix(DoubleMatrix input) { DenseDoubleMatrix log = new DenseDoubleMatrix(input.getRowCount(), input.getColumnCount()); for (int row = 0; row < log.getRowCount(); row++) { for (int col = 0; col < log.getColumnCount(); col++) { double d = input.get(row, col); log.set(row, col, guardedLogarithm(d)); } } return log; }