/** * Compute mel frequency from linear frequency. * * @param inputFreq the input frequency in linear scale * @return the frequency in a mel scale */ private double linearToMel(double inputFreq) { return 1127 * Math.log1p(inputFreq / 700); }
/** * the Percentile is non-fixed length that is affected by the count and compression mainly. so we collect some statistics * about it and do some analysis, which get from test and the T-digest Paper * * As a result, we conclude its regular pattern by Stata , a tool help construct function model. * 0 - 2 * compression it grows a linear function which is easily derived from T-digest Algorithm * 2 * compression - 50000000 it grows roughly a log and the accuracy increases with the number of samples observed * * @param count * @return */ public double getBytesEstimate(double count) { if (count <= 2 * compression) return 16 + count * 5; switch ((int) compression) { case 100: return 597.9494 * Math.log1p(count) - 2358.987; case 1000: return 5784.34 * Math.log1p(count) - 35030.97; case 10000: return 54313.96 * Math.log1p(count) - 438988.8; default: return 0.0; } }
@Override protected ExprEval eval(double param) { return ExprEval.of(Math.log1p(param)); } }
/** * Returns a random number from the distribution. */ @Override public double nextDouble() { return -Math.log1p(-randomDouble()) / lambda; }
@Override public double apply(double arg1) { return -Math.log1p(-rand.nextDouble()); } });
/** * Returns an estimate for the total number of distinct elements that have been added to this * Bloom filter. This approximation is reasonably accurate if it does not exceed the value of * {@code expectedInsertions} that was used when constructing the filter. * * @since 22.0 */ public long approximateElementCount() { long bitSize = bits.bitSize(); long bitCount = bits.bitCount(); /** * Each insertion is expected to reduce the # of clear bits by a factor of * `numHashFunctions/bitSize`. So, after n insertions, expected bitCount is `bitSize * (1 - (1 - * numHashFunctions/bitSize)^n)`. Solving that for n, and approximating `ln x` as `x - 1` when x * is close to 1 (why?), gives the following formula. */ double fractionOfBitsSet = (double) bitCount / bitSize; return DoubleMath.roundToLong( -Math.log1p(-fractionOfBitsSet) * bitSize / numHashFunctions, RoundingMode.HALF_UP); }
public Vector logNormalize(double power, double normLength) { // we can special case certain powers if (Double.isInfinite(power) || power <= 1.0) { throw new IllegalArgumentException("Power must be > 1 and < infinity"); } else { double denominator = normLength * Math.log(power); Vector result = createOptimizedCopy(); for (Element element : result.nonZeroes()) { element.set(Math.log1p(element.get()) / denominator); } return result; } }
/** * @param power Power. * @param normLen Normalized length. * @return logNormalized value. */ private Vector logNormalize(double power, double normLen) { assert !(Double.isInfinite(power) || power <= 1.0); double denominator = normLen * Math.log(power); Vector cp = copy(); for (Element element : cp.all()) element.set(Math.log1p(element.get()) / denominator); return cp; }
/** * Returns an estimate for the total number of distinct elements that have been added to this * Bloom filter. This approximation is reasonably accurate if it does not exceed the value of * {@code expectedInsertions} that was used when constructing the filter. * * @since 22.0 */ public long approximateElementCount() { long bitSize = bits.bitSize(); long bitCount = bits.bitCount(); /** * Each insertion is expected to reduce the # of clear bits by a factor of * `numHashFunctions/bitSize`. So, after n insertions, expected bitCount is `bitSize * (1 - (1 - * numHashFunctions/bitSize)^n)`. Solving that for n, and approximating `ln x` as `x - 1` when x * is close to 1 (why?), gives the following formula. */ double fractionOfBitsSet = (double) bitCount / bitSize; return DoubleMath.roundToLong( -Math.log1p(-fractionOfBitsSet) * bitSize / numHashFunctions, RoundingMode.HALF_UP); }
private static double[] exp(int n) { double[] r = new double[n]; Random gen = RandomUtils.getRandom(1L); for (int i = 0; i < n; i++) { r[i] = -Math.log1p(-gen.nextDouble()); } return r; }
/** * Returns an estimate for the total number of distinct elements that have been added to this * Bloom filter. This approximation is reasonably accurate if it does not exceed the value of * {@code expectedInsertions} that was used when constructing the filter. * * @since 22.0 */ public long approximateElementCount() { long bitSize = bits.bitSize(); long bitCount = bits.bitCount(); /** * Each insertion is expected to reduce the # of clear bits by a factor of * `numHashFunctions/bitSize`. So, after n insertions, expected bitCount is `bitSize * (1 - (1 - * numHashFunctions/bitSize)^n)`. Solving that for n, and approximating `ln x` as `x - 1` when x * is close to 1 (why?), gives the following formula. */ double fractionOfBitsSet = (double) bitCount / bitSize; return DoubleMath.roundToLong( -Math.log1p(-fractionOfBitsSet) * bitSize / numHashFunctions, RoundingMode.HALF_UP); }
/** * Returns an estimate for the total number of distinct elements that have been added to this * Bloom filter. This approximation is reasonably accurate if it does not exceed the value of * {@code expectedInsertions} that was used when constructing the filter. * * @since 22.0 */ public long approximateElementCount() { long bitSize = bits.bitSize(); long bitCount = bits.bitCount(); /** * Each insertion is expected to reduce the # of clear bits by a factor of * `numHashFunctions/bitSize`. So, after n insertions, expected bitCount is `bitSize * (1 - (1 - * numHashFunctions/bitSize)^n)`. Solving that for n, and approximating `ln x` as `x - 1` when x * is close to 1 (why?), gives the following formula. */ double fractionOfBitsSet = (double) bitCount / bitSize; return DoubleMath.roundToLong( -Math.log1p(-fractionOfBitsSet) * bitSize / numHashFunctions, RoundingMode.HALF_UP); }
q = q0 - s * t + 0.25 * t * t + (ss + ss) * Math.log1p(v); } else { q = q0 + 0.5 * t * t * ((((((((a9 * v + a8) * v + a7) * v + a6) if (Math.log1p(-u) <= q) { return gds / rate; q = q0 - s * t + 0.25 * t * t + (ss + ss) * Math.log1p(v); } else { q = q0 + 0.5 * t * t * ((((((((a9 * v + a8) * v + a7) * v + a6)
@Test public void beta() { Random r = RandomUtils.getRandom(); for (int i = 0; i < 200; i++) { double alpha = -50 * Math.log1p(-r.nextDouble()); double beta = -50 * Math.log1p(-r.nextDouble()); double ref = Math.exp(Gamma.logGamma(alpha) + Gamma.logGamma(beta) - Gamma.logGamma(alpha + beta)); double actual = Gamma.beta(alpha, beta); double err = (ref - actual) / ref; assertEquals("beta at (" + alpha + ", " + beta + ") relative error = " + err, 0, err, 1.0e-10); } }
/** * Returns the natural log of the values in this column, after adding 1 to each so that zero * values don't return -Infinity */ default DoubleColumn log1p() { DoubleColumn newColumn = DoubleColumn.create(name() + "[1og1p]", size()); for (int i = 0; i < size(); i++) { newColumn.set(i, Math.log1p(getDouble(i))); } return newColumn; }
double epsilon = model.getEpsilon(); inputRDD = noNaN.map(tuple -> new UserItemStrength(tuple._1()._1(), tuple._1()._2(), (float) Math.log1p(tuple._2() / epsilon))); } else { inputRDD = noNaN.map(tuple -> new UserItemStrength(tuple._1()._1(), tuple._1()._2(),
@Override public double apply(double n) { return Math.log1p(n); } },
@Override public double apply(double n) { return Math.log1p(n + 1); } },
/** * Combines {@link Rating}s with the same user/item into one, with score as the sum of * all of the scores. */ private JavaRDD<Rating> aggregateScores(JavaRDD<? extends Rating> original, double epsilon) { JavaPairRDD<Tuple2<Integer,Integer>,Double> tuples = original.mapToPair(rating -> new Tuple2<>(new Tuple2<>(rating.user(), rating.product()), rating.rating())); JavaPairRDD<Tuple2<Integer,Integer>,Double> aggregated; if (implicit) { // TODO can we avoid groupByKey? reduce, combine, fold don't seem viable since // they don't guarantee the delete elements are properly handled aggregated = tuples.groupByKey().mapValues(MLFunctions.SUM_WITH_NAN); } else { // For non-implicit, last wins. aggregated = tuples.foldByKey(Double.NaN, (current, next) -> next); } JavaPairRDD<Tuple2<Integer,Integer>,Double> noNaN = aggregated.filter(kv -> !Double.isNaN(kv._2())); if (logStrength) { return noNaN.map(userProductScore -> new Rating( userProductScore._1()._1(), userProductScore._1()._2(), Math.log1p(userProductScore._2() / epsilon))); } else { return noNaN.map(userProductScore -> new Rating( userProductScore._1()._1(), userProductScore._1()._2(), userProductScore._2())); } }
/** * Important note: do not change anything in the following function. * It has been carefully designed and tested for numerical accuracy. * In particular, the use of log1p and expm1 is critically important. * @param kf the value of k as a double * @param nf the value of n as a double * @param col the given column * @return the quantity qnj */ static double qnj(final double kf, final double nf, final int col) { final double tmp1 = -1.0 / (kf * (Math.pow(2.0, col))); final double tmp2 = Math.log1p(tmp1); return (-1.0 * (Math.expm1(nf * tmp2))); }