/** * Maximum function with a scalar * * @param ndArray tbe ndarray * @param k * @return */ public static INDArray max(INDArray ndArray, double k) { return max(ndArray, k, true); }
/** * Element wise maximum function between 2 INDArrays * * @param first * @param second * @return */ public static INDArray max(INDArray first, INDArray second) { return max(first, second, true); }
/** * @param mean row vector of means * @param std row vector of standard deviations */ public DistributionStats(@NonNull INDArray mean, @NonNull INDArray std) { Transforms.max(std, Nd4j.EPS_THRESHOLD, false); if (std.min(1) == Nd4j.scalar(Nd4j.EPS_THRESHOLD)) { logger.info("API_INFO: Std deviation found to be zero. Transform will round up to epsilon to avoid nans."); } this.mean = mean; this.std = std; }
/** * @param lower row vector of lower bounds * @param upper row vector of upper bounds */ public MinMaxStats(@NonNull INDArray lower, @NonNull INDArray upper) { // Check for 0 differences and round up to epsilon INDArray diff = upper.sub(lower); INDArray addedPadding = Transforms.max(diff, Nd4j.EPS_THRESHOLD).subi(diff); // If any entry in `addedPadding` is not 0, then we had to add something to prevent 0 difference, Add this same // value to the upper bounds to actually apply the padding, and log about it if (addedPadding.sumNumber().doubleValue() > 0) { log.info("API_INFO: max val minus min val found to be zero. Transform will round up to epsilon to avoid nans."); upper.addi(addedPadding); } this.lower = lower; this.upper = upper; }
System.out.println("Element-wise tanh on random array:\n" + Transforms.tanh(random)); System.out.println("Element-wise power (x^3.0) on random array:\n" + Transforms.pow(random,3.0)); System.out.println("Element-wise scalar max (with scalar 0.5):\n" + Transforms.max(random,0.5));
private INDArray scoreArray(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) { if (labels.size(1) != preOutput.size(1)) { throw new IllegalArgumentException( "Labels array numColumns (size(1) = " + labels.size(1) + ") does not match output layer" + " number of outputs (nOut = " + preOutput.size(1) + ") "); } INDArray output = activationFn.getActivation(preOutput.dup(), true); // Clip output and labels to be between Nd4j.EPS_THREsHOLD and 1, i.e. a valid non-zero probability output = Transforms.min(Transforms.max(output, Nd4j.EPS_THRESHOLD, false), 1, false); labels = Transforms.min(Transforms.max(labels, Nd4j.EPS_THRESHOLD, true), 1, false); INDArray logRatio = Transforms.log(output.rdivi(labels), false); INDArray scoreArr = logRatio.muli(labels); if (mask != null) { LossUtil.applyMask(scoreArr, mask); } return scoreArr; }
Transforms.max(runningUpper, batchMax, false);
@Override public INDArray computeGradient(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) { if (labels.size(1) != preOutput.size(1)) { throw new IllegalArgumentException( "Labels array numColumns (size(1) = " + labels.size(1) + ") does not match output layer" + " number of outputs (nOut = " + preOutput.size(1) + ") "); } INDArray yhat = activationFn.getActivation(preOutput.dup(), true); INDArray yL2norm = labels.norm2(1); INDArray yhatL2norm = yhat.norm2(1); INDArray yhatL2normSq = yhatL2norm.mul(yhatL2norm); //Note: This is not really the L1 norm since I am not taking abs values INDArray yhatDotyL1norm = labels.mul(yhat).sum(1); INDArray dLda = labels.mulColumnVector(yhatL2normSq); dLda.subi(yhat.mulColumnVector(yhatDotyL1norm)); // transform vals to avoid nans before div yL2norm = Transforms.max(yL2norm, Nd4j.EPS_THRESHOLD, false); yhatL2norm = Transforms.max(yhatL2norm, Nd4j.EPS_THRESHOLD, false); yhatL2normSq = Transforms.max(yhatL2normSq, Nd4j.EPS_THRESHOLD, false); dLda.diviColumnVector(yL2norm); dLda.diviColumnVector(yhatL2norm.mul(yhatL2normSq)); dLda.muli(-1); //dL/dz INDArray gradients = activationFn.backprop(preOutput, dLda).getFirst(); //TODO loss functions with params if (mask != null) { gradients.muliColumnVector(mask); } return gradients; }
/** * * @param relStep * @param x * @return */ public static INDArray computeAbsoluteStep(INDArray relStep,INDArray x) { if(relStep == null) { relStep = pow(Nd4j.scalar(getEpsRelativeTo(x)),0.5); } INDArray signX0 = x.gte(0).muli(2).subi(1); return signX0.mul(relStep).muli(max(abs(x),1.0)); }
Transforms.max(vHat, v, false);
/** * Element wise maximum function between 2 INDArrays * * @param first * @param second * @return */ public static INDArray max(INDArray first, INDArray second) { return max(first, second, Nd4j.copyOnOps); }
/** * Maximum function with a scalar * * @param ndArray tbe ndarray * @param k * @return */ public static INDArray max(INDArray ndArray, double k) { return max(ndArray, k, Nd4j.copyOnOps); }
/** * @param mean row vector of means * @param std row vector of standard deviations */ public DistributionStats(@NonNull INDArray mean, @NonNull INDArray std) { Transforms.max(std, Nd4j.EPS_THRESHOLD, false); if (std.min(1) == Nd4j.scalar(Nd4j.EPS_THRESHOLD)) { logger.info("API_INFO: Std deviation found to be zero. Transform will round up to epsilon to avoid nans."); } this.mean = mean; this.std = std; }
/** * @param lower row vector of lower bounds * @param upper row vector of upper bounds */ public MinMaxStats(@NonNull INDArray lower, @NonNull INDArray upper) { // Check for 0 differences and round up to epsilon INDArray diff = upper.sub(lower); INDArray addedPadding = Transforms.max(diff, Nd4j.EPS_THRESHOLD).subi(diff); // If any entry in `addedPadding` is not 0, then we had to add something to prevent 0 difference, Add this same // value to the upper bounds to actually apply the padding, and log about it if (addedPadding.sumNumber().doubleValue() > 0) { log.info("API_INFO: max val minus min val found to be zero. Transform will round up to epsilon to avoid nans."); upper.addi(addedPadding); } this.lower = lower; this.upper = upper; }
private INDArray averageOverCorrectClasses(INDArray labels, INDArray positiveExamples) { INDArray correctClassesByExample = Transforms.max(labels.sum(1), 1); // fix for examples without label return positiveExamples.sum(1).divi(correctClassesByExample); }
@Override public IntegerTensor maxInPlace(IntegerTensor max) { if (max.isScalar()) { Transforms.max(tensor, max.scalar(), false); } else { tensor = INDArrayShim.max(tensor, unsafeGetNd4J(max)); } return this; }
@Override public DoubleTensor maxInPlace(DoubleTensor max) { if (max.isScalar()) { Transforms.max(tensor, max.scalar(), false); } else { tensor = INDArrayShim.max(tensor, unsafeGetNd4J(max)); } return this; }
private INDArray scoreArray(INDArray labels, INDArray preOutput, IActivation activationFn, INDArray mask) { if (labels.size(1) != preOutput.size(1)) { throw new IllegalArgumentException("Labels array numColumns (size(1) = " + labels.size(1) + ") does not match output layer" + " number of outputs (nOut = " + preOutput.size(1) + ") "); } INDArray output = activationFn.getActivation(preOutput.dup(), true); // Clip output and labels to be between Nd4j.EPS_THREsHOLD and 1, i.e. a valid non-zero probability output = Transforms.min(Transforms.max(output, Nd4j.EPS_THRESHOLD, false), 1, false); labels = Transforms.min(Transforms.max(labels, Nd4j.EPS_THRESHOLD, true), 1, false); INDArray logRatio = Transforms.log(output.rdivi(labels), false); INDArray scoreArr = logRatio.muli(labels); if (mask != null) { LossUtil.applyMask(scoreArr, mask); } return scoreArr; }
@Override public INDArray doForward(boolean training) { if (!canDoForward()) throw new IllegalStateException("Cannot do forward pass: inputs not set (L2NormalizeVertex " + vertexName + " idx " + vertexIndex + ")"); // L2 norm along all dimensions except 0, unless user-specified // x / |x|2 INDArray x = inputs[0]; int[] dimensions = getDimensions(x); INDArray xNorm2 = x.norm2(dimensions); Transforms.max(xNorm2, eps, false); if (x.rank() == 2) { return x.divColumnVector(xNorm2); } else { INDArray out = Nd4j.createUninitialized(x.shape(), x.ordering()); return Nd4j.getExecutioner().execAndReturn(new BroadcastDivOp(x, xNorm2, out, 0)); } }