@Override public Map<Integer, Double> labelCounts() { Map<Integer, Double> ret = new HashMap<>(); if (labels == null) return ret; long nTensors = labels.tensorssAlongDimension(1); for (int i = 0; i < nTensors; i++) { INDArray row = labels.tensorAlongDimension(i, 1); INDArray javaRow = labels.javaTensorAlongDimension(i, 1); int maxIdx = Nd4j.getBlasWrapper().iamax(row); int maxIdxJava = Nd4j.getBlasWrapper().iamax(javaRow); if (maxIdx < 0) throw new IllegalStateException("Please check the iamax implementation for " + Nd4j.getBlasWrapper().getClass().getName()); if (ret.get(maxIdx) == null) ret.put(maxIdx, 1.0); else ret.put(maxIdx, ret.get(maxIdx) + 1.0); } return ret; }
public ScatterUpdate(@NonNull INDArray original, @NonNull INDArray updates, INDArray result, @NonNull int[] indices, int[] dimension, @NonNull UpdateOp op) { List<Integer> iargs = new ArrayList<>(); iargs.add(op.ordinal()); iargs.add(dimension.length); for (val v: dimension) iargs.add(v); iargs.add(indices.length); for (val v: indices) iargs.add(v); if (updates.tensorAlongDimension(0, dimension).lengthLong() != original.tensorAlongDimension(0, dimension).lengthLong()) throw new ND4JIllegalStateException("ScatterUpdate requires equal shaped tensors for operation along given dimension(s)"); long numTensors = original.tensorssAlongDimension(dimension); for (val idx: indices) if (idx >= numTensors) throw new ND4JIllegalStateException("Can't update index higher then num tensors"); this.op = DynamicCustomOp.builder("scatter_update") .addInputs(original, updates) .callInplace(true) .addIntegerArguments(iargs) .build(); }
public static INDArray tailor4d2d(@NonNull INDArray data) { long instances = data.size(0); long channels = data.size(1); long height = data.size(2); long width = data.size(3); INDArray in2d = Nd4j.create(channels, height * width * instances); long tads = data.tensorssAlongDimension(3, 2, 0); for (int i = 0; i < tads; i++) { INDArray thisTAD = data.tensorAlongDimension(i, 3, 2, 0); in2d.putRow(i, Nd4j.toFlattened(thisTAD)); } return in2d.transposei(); }
@Override public INDArray percentile(Number quantile, int... dimension) { if (quantile.doubleValue() < 0 || quantile.doubleValue() > 100) throw new ND4JIllegalStateException("Percentile value should be in 0...100 range"); if (isScalar()) return Nd4j.scalar(this.getDouble(0)); INDArray sorted = Nd4j.getNDArrayFactory().sort(this.dup(this.ordering()), false, dimension); // there's no practical sense doing this on GPU, stride will be just size of TAD. INDArray ret = Nd4j.createUninitialized(sorted.tensorssAlongDimension(dimension)); for (int i = 0; i < ret.length(); i++) { ret.putScalar(i, getPercentile(quantile, sorted.tensorAlongDimension(i, dimension))); } return ret; }
@Override public INDArray percentile(Number quantile, int... dimension) { if (quantile.doubleValue() < 0 || quantile.doubleValue() > 100) throw new ND4JIllegalStateException("Percentile value should be in 0...100 range"); if (isScalar()) return Nd4j.scalar(this.getDouble(0)); INDArray sorted = Nd4j.getNDArrayFactory().sort(this.dup(this.ordering()), false, dimension); // there's no practical sense doing this on GPU, stride will be just size of TAD. INDArray ret = Nd4j.createUninitialized(sorted.tensorssAlongDimension(dimension)); for (int i = 0; i < ret.length(); i++) { ret.putScalar(i, getPercentile(quantile, sorted.tensorAlongDimension(i, dimension))); } return ret; }
/** Tensor1DStats, used to efficiently iterate through tensors on a matrix (2d NDArray) for element-wise ops * For example, the offset of each 1d tensor can be calculated using only a single tensorAlongDimension method call, * hence is potentially faster than approaches requiring multiple tensorAlongDimension calls.<br> * Note that this can only (generally) be used for 2d NDArrays. For certain 3+d NDArrays, the tensor starts may not * be in increasing order */ public static Tensor1DStats get1DTensorStats(INDArray array, int... dimension) { long tensorLength = array.size(dimension[0]); //As per tensorssAlongDimension: long numTensors = array.tensorssAlongDimension(dimension); //First tensor always starts with the first element in the NDArray, regardless of dimension long firstTensorOffset = array.offset(); //Next: Need to work out the separation between the start (first element) of each 1d tensor long tensorStartSeparation; int elementWiseStride; //Separation in buffer between elements in the tensor if (numTensors == 1) { tensorStartSeparation = -1; //Not applicable elementWiseStride = array.elementWiseStride(); } else { INDArray secondTensor = array.tensorAlongDimension(1, dimension); tensorStartSeparation = secondTensor.offset() - firstTensorOffset; elementWiseStride = secondTensor.elementWiseStride(); } return new Tensor1DStats(firstTensorOffset, tensorStartSeparation, numTensors, tensorLength, elementWiseStride); }
INDArray[] retAlongDimensionArrays = new INDArray[(int) ret.tensorssAlongDimension(dimension)]; for (int i = 0; i < retAlongDimensionArrays.length; i++) retAlongDimensionArrays[i] = ret.tensorAlongDimension(i, dimension); long arrTensorLength = -1; if (arr.tensorssAlongDimension(dimension) != ret.tensorssAlongDimension(dimension)) throw new IllegalStateException("Illegal concatenate. Tensors along dimension must be same length."); for (int i = 0; i < arr.tensorssAlongDimension(dimension); i++) { INDArray retLinear = retAlongDimensionArrays[i]; INDArray arrTensor = arr.tensorAlongDimension(i, dimension);
/** * * @param accum the operation to accumulate * @param originalDimension the bigger problem * @param smallerDimension the smaller problem */ public TadCollapseAccumulation(Op accum, int[] originalDimension, int[] smallerDimension, boolean performSmallerDimension) { this.accum = accum; this.performSmallerDimension = performSmallerDimension; this.originalDimension = originalDimension; this.smallerDimension = smallerDimension; tadsForSmallerDimension = accum.x().tensorssAlongDimension(smallerDimension); tadsForLargerDimension = accum.x().tensorssAlongDimension(originalDimension); }
@Override public Map<Integer, Double> labelCounts() { Map<Integer, Double> ret = new HashMap<>(); if (labels == null) return ret; int nTensors = labels.tensorssAlongDimension(1); for (int i = 0; i < nTensors; i++) { INDArray row = labels.tensorAlongDimension(i, 1); INDArray javaRow = labels.javaTensorAlongDimension(i, 1); int maxIdx = Nd4j.getBlasWrapper().iamax(row); int maxIdxJava = Nd4j.getBlasWrapper().iamax(javaRow); if (maxIdx < 0) throw new IllegalStateException("Please check the iamax implementation for " + Nd4j.getBlasWrapper().getClass().getName()); if (ret.get(maxIdx) == null) ret.put(maxIdx, 1.0); else ret.put(maxIdx, ret.get(maxIdx) + 1.0); } return ret; }
@Override public void exec(int... dimension) { int[] retShape = ArrayUtil.removeIndex(x.shape(), dimension); int nOps = x.tensorssAlongDimension(dimension); z = Nd4j.create(retShape); for (int i = 0; i < nOps; i++) { double d = Nd4j.getExecutioner().execAndReturn((JaccardDistance) opForDimension(i, dimension)) .getFinalResult().doubleValue(); z.putScalar(i, d); } }
@Override public void exec(int... dimension) { int[] retShape = ArrayUtil.removeIndex(x.shape(), dimension); int nOps = x.tensorssAlongDimension(dimension); z = Nd4j.create(retShape); for (int i = 0; i < nOps; i++) { double d = Nd4j.getExecutioner().execAndReturn((Bias) opForDimension(i, dimension)).getFinalResult() .doubleValue(); z.putScalar(i, d); } } }
@Override public void exec(int... dimension) { int[] retShape = ArrayUtil.removeIndex(x.shape(), dimension); int nOps = x.tensorssAlongDimension(dimension); z = Nd4j.create(retShape); for (int i = 0; i < nOps; i++) { double d = Nd4j.getExecutioner().execAndReturn((CosineDistance) opForDimension(i, dimension)) .getFinalResult().doubleValue(); z.putScalar(i, d); } }
@Override public void exec(int... dimension) { int[] retShape = ArrayUtil.removeIndex(x.shape(), dimension); int nOps = x.tensorssAlongDimension(dimension); z = Nd4j.create(retShape); for (int i = 0; i < nOps; i++) { double d = Nd4j.getExecutioner().execAndReturn((HammingDistance) opForDimension(i, dimension)) .getFinalResult().doubleValue(); z.putScalar(i, d); } }
@Override public void exec(int... dimension) { int[] retShape = ArrayUtil.removeIndex(x.shape(), dimension); int nOps = x.tensorssAlongDimension(dimension); z = Nd4j.create(retShape); for (int i = 0; i < nOps; i++) { double d = Nd4j.getExecutioner().execAndReturn((CosineSimilarity) opForDimension(i, dimension)) .getFinalResult().doubleValue(); z.putScalar(i, d); } }
public static INDArray tailor4d2d(@NonNull INDArray data) { int instances = data.size(0); int channels = data.size(1); int height = data.size(2); int width = data.size(3); INDArray in2d = Nd4j.create(channels, height * width * instances); int tads = data.tensorssAlongDimension(3, 2, 0); for (int i = 0; i < tads; i++) { INDArray thisTAD = data.tensorAlongDimension(i, 3, 2, 0); in2d.putRow(i, Nd4j.toFlattened(thisTAD)); } return in2d.transposei(); }
@Override public void exec(int... dimension) { if (dimension.length == 1 && dimension[0] == Integer.MAX_VALUE) { exec(); return; } int[] retShape = ArrayUtil.removeIndex(x.shape(), dimension); int nOps = x.tensorssAlongDimension(dimension); z = Nd4j.create(retShape); for (int i = 0; i < nOps; i++) { double d = Nd4j.getExecutioner().execAndReturn(opForDimension(i, dimension)).getFinalResult().doubleValue(); z.putScalar(i, d); } }
@Override public void exec(int... dimension) { if (dimension.length == 1 && dimension[0] == Integer.MAX_VALUE) { exec(); this.z = Nd4j.scalar(this.finalResult); return; } int[] retShape = ArrayUtil.removeIndex(x.shape(), dimension); int nOps = x.tensorssAlongDimension(dimension); z = Nd4j.create(retShape); for (int i = 0; i < nOps; i++) { double d = Nd4j.getExecutioner().execAndReturn(opForDimension(i, dimension)).getFinalResult().doubleValue(); z.putScalar(i, d); } }
@Override public INDArray percentile(Number quantile, int... dimension) { if (quantile.doubleValue() < 0 || quantile.doubleValue() > 100) throw new ND4JIllegalStateException("Percentile value should be in 0...100 range"); if (isScalar()) return Nd4j.scalar(this.getDouble(0)); INDArray sorted = Nd4j.getNDArrayFactory().sort(this.dup(this.ordering()), false, dimension); // there's no practical sense doing this on GPU, stride will be just size of TAD. INDArray ret = Nd4j.createUninitialized(sorted.tensorssAlongDimension(dimension)); for (int i = 0; i < ret.length(); i++) { ret.putScalar(i, getPercentile(quantile, sorted.tensorAlongDimension(i, dimension))); } return ret; } }
/** Tensor1DStats, used to efficiently iterate through tensors on a matrix (2d NDArray) for element-wise ops * For example, the offset of each 1d tensor can be calculated using only a single tensorAlongDimension method call, * hence is potentially faster than approaches requiring multiple tensorAlongDimension calls.<br> * Note that this can only (generally) be used for 2d NDArrays. For certain 3+d NDArrays, the tensor starts may not * be in increasing order */ public static Tensor1DStats get1DTensorStats(INDArray array, int... dimension) { int tensorLength = array.size(dimension[0]); //As per tensorssAlongDimension: int numTensors = array.tensorssAlongDimension(dimension); //First tensor always starts with the first element in the NDArray, regardless of dimension long firstTensorOffset = array.offset(); //Next: Need to work out the separation between the start (first element) of each 1d tensor long tensorStartSeparation; int elementWiseStride; //Separation in buffer between elements in the tensor if (numTensors == 1) { tensorStartSeparation = -1; //Not applicable elementWiseStride = array.elementWiseStride(); } else { INDArray secondTensor = array.tensorAlongDimension(1, dimension); tensorStartSeparation = secondTensor.offset() - firstTensorOffset; elementWiseStride = secondTensor.elementWiseStride(); } return new Tensor1DStats(firstTensorOffset, tensorStartSeparation, numTensors, tensorLength, elementWiseStride); }
for (int i = 0; i < op.x().tensorssAlongDimension(dimension); i++) { Op op2 = op.opForDimension(i, dimension); IComplexNumber result = execAndReturn((Accumulation) op2).getFinalResultComplex();