@Override public final DoubleVector subtractFrom(double v) { DenseDoubleVector newv = new DenseDoubleVector(vector.length); for (int i = 0; i < vector.length; i++) { newv.set(i, v - vector[i]); } return newv; }
@Override public final DoubleVector subtract(double v) { DenseDoubleVector newv = new DenseDoubleVector(vector.length); for (int i = 0; i < vector.length; i++) { newv.set(i, vector[i] - v); } return newv; }
split.length - 1); for (int i = 1; i < split.length; i++) { featureVector.set(i - 1, Integer.parseInt(split[i]));
/** * @return a new vector filled from index, to index, with a given stepsize. */ public static DenseDoubleVector fromUpTo(double from, double to, double stepsize) { DenseDoubleVector v = new DenseDoubleVector( (int) (FastMath.round(((to - from) / stepsize) + 0.5))); for (int i = 0; i < v.getLength(); i++) { v.set(i, from + i * stepsize); } return v; }
@Override public DoubleVector divide(double scalar) { if (scalar == 0d) { throw new java.lang.ArithmeticException("/ by zero"); } DenseDoubleVector v = new DenseDoubleVector(this.getLength()); for (int i = 0; i < v.getLength(); i++) { v.set(i, this.get(i) / scalar); } return v; }
/** * @return the transition probabilities for the states. */ public DoubleVector getTransitionProbabilities(int[] stateSequence) { DenseDoubleVector distribution = new DenseDoubleVector( stateSequence.length - 1); for (int i = 0; i < distribution.getDimension(); i++) { distribution.set(i, transitionProbabilities.get(stateSequence[i], stateSequence[i + 1])); } return distribution; }
@Override public DoubleVector pow(double x) { DenseDoubleVector v = new DenseDoubleVector(this.getLength()); for (int i = 0; i < v.getLength(); i++) { double value = 0.0d; // it is faster to multiply when we having ^2 if (x == 2d) { value = vector[i] * vector[i]; } else { value = FastMath.pow(vector[i], x); } v.set(i, value); } return v; }
/** * @return a log'd matrix that was guarded against edge cases of the * logarithm. */ public static DoubleVector logVector(DoubleVector input) { DenseDoubleVector log = new DenseDoubleVector(input.getDimension()); for (int col = 0; col < log.getDimension(); col++) { log.set(col, guardedLogarithm(input.get(col))); } return log; }
private DenseDoubleVector getProbabilityDistribution(DoubleVector document) { int numClasses = classPriorProbability.getLength(); DenseDoubleVector distribution = new DenseDoubleVector(numClasses); // loop through all classes and get the max probable one for (int i = 0; i < numClasses; i++) { double probability = getProbabilityForClass(document, i); distribution.set(i, probability); } double maxProbability = distribution.max(); double probabilitySum = 0.0d; // we normalize it back for (int i = 0; i < numClasses; i++) { double probability = distribution.get(i); double normalizedProbability = FastMath.exp(probability - maxProbability + classPriorProbability.get(i)); distribution.set(i, normalizedProbability); probabilitySum += normalizedProbability; } // since the sum is sometimes not 1, we need to divide by the sum distribution = (DenseDoubleVector) distribution.divide(probabilitySum); return distribution; }
/** * Computes the relevance for each term in U (universe of entities) to the * terms in the seedset. * * @param seedSet S a subset of U, this are the indices where to find the * items in the similarity matrix. * @return a vector of length of the universe of entities. Which index * encapsulates the relevance described in the paper as * S_rel(TERM_AT_INDEX_i,S) */ private DenseDoubleVector computeRelevanceScore(int[] seedSet) { final int termsLength = termNodes.length; final DenseDoubleVector relevanceScores = new DenseDoubleVector(termsLength); final double constantLoss = 1.0d / seedSet.length; for (int i = 0; i < termsLength; i++) { double sum = 0.0d; for (int j : seedSet) { DoubleVector columnVectorI = weightMatrix.getColumnVector(i); DoubleVector columnVectorJ = weightMatrix.getColumnVector(j); double similarity = 0.0d; if (columnVectorI != null && columnVectorJ != null) { similarity = similarityMeasurer.measureSimilarity(columnVectorI, columnVectorJ); } sum += similarity; } relevanceScores.set(i, constantLoss * sum); } return relevanceScores; }
private DenseDoubleVector getProbabilityDistribution(DoubleVector document) { int numClasses = model.getClassPriorProbability().getLength(); DenseDoubleVector distribution = new DenseDoubleVector(numClasses); // loop through all classes and get the max probable one for (int i = 0; i < numClasses; i++) { double probability = getProbabilityForClass(document, i); distribution.set(i, probability); } double maxProbability = distribution.max(); double probabilitySum = 0.0d; // we normalize it back for (int i = 0; i < numClasses; i++) { double probability = distribution.get(i); double normalizedProbability = FastMath.exp(probability - maxProbability + model.getClassPriorProbability().get(i)); distribution.set(i, normalizedProbability); probabilitySum += normalizedProbability; } // since the sum is sometimes not 1, we need to divide by the sum distribution = (DenseDoubleVector) distribution.divide(probabilitySum); return distribution; }
/** * Folds the given matrices column-wise into a single vector. */ public static DoubleVector foldMatrices(DoubleMatrix... matrices) { int length = 0; for (DoubleMatrix matrix : matrices) { length += matrix.getRowCount() * matrix.getColumnCount(); } DenseDoubleVector v = new DenseDoubleVector(length); int index = 0; for (DoubleMatrix matrix : matrices) { for (int j = 0; j < matrix.getColumnCount(); j++) { for (int i = 0; i < matrix.getRowCount(); i++) { v.set(index++, matrix.get(i, j)); } } } return v; }
@Override public Tuple<Double, DoubleVector> call() throws Exception { // loop over all particles and calculate new positions for (int particleIndex = range.getStart(); particleIndex < range.getEnd(); particleIndex++) { DoubleVector currentPosition = particlePositions[particleIndex]; DoubleVector currentBest = particlePersonalBestPositions[particleIndex]; DenseDoubleVector vec = new DenseDoubleVector(dim); for (int index = 0; index < vec.getDimension(); index++) { double value = (phi * currentPosition.get(index)) // inertia + (alpha * random.nextDouble() * (currentBest.get(index) - currentPosition .get(index))) // personal memory + (beta * random.nextDouble() * (globalBestPosition.get(index) - currentPosition .get(index))); // group memory vec.set(index, value); } particlePositions[particleIndex] = vec; double cost = f.evaluateCost(vec).getCost(); // check if we have a personal best if (cost < particlePersonalBestCost[particleIndex]) { particlePersonalBestCost[particleIndex] = cost; particlePersonalBestPositions[particleIndex] = vec; // if we had a personal best, do we have a better global? if (cost < globalCost) { globalCost = cost; globalBestPosition = vec; } } } return new Tuple<>(globalCost, globalBestPosition); }
/** * @return If the number of outcomes is 2 (binary prediction) the returned * vector contains the class id (0 or 1) at the first index. If not, a * histogram of the classes that were predicted. */ @Override public DoubleVector predict(DoubleVector features) { List<VectorDistanceTuple<DoubleVector>> nearestNeighbours = getNearestNeighbours( features, k); DenseDoubleVector outcomeHistogram = new DenseDoubleVector(numOutcomes); for (VectorDistanceTuple<DoubleVector> tuple : nearestNeighbours) { int classIndex = 0; if (numOutcomes == 2) { classIndex = (int) tuple.getValue().get(0); } else { classIndex = tuple.getValue().maxIndex(); } outcomeHistogram.set(classIndex, outcomeHistogram.get(classIndex) + 1); } if (numOutcomes == 2) { return new SingleEntryDoubleVector(outcomeHistogram.maxIndex()); } else { return outcomeHistogram; } }