/** * Returns the gradient, re-computes if gradient is stale. <p> * * *Note*: Assumes that <tt>buffer</tt> is already initialized. */ public void getValueGradient (double[] buffer) { if (cacheIndicator.isGradientStale()) { // compute values again if required this.getValue(); // compute gradients again try { // run all threads and wait for them to finish executor.invokeAll(gradientTasks); } catch (InterruptedException ie) { ie.printStackTrace(); } } optimizable.combineGradients(batchCachedGradient, buffer); }
/** * Computes the gradient for a batch, always returns true. */ public Boolean call() { optimizable.getBatchValueGradient(batchCachedGradient.get(batchIndex), batchIndex, batchAssignments); return true; } }
this.optimizable = optimizable; int numBatches = optimizable.getNumBatches(); assert(numBatches > 0) : "Invalid number of batches: " + numBatches; batchCachedValue = new double[numBatches];
/** * Creates tasks to be executed in parallel, each task looks at a batch of * data. */ protected void createTasks() { int numBatches = optimizable.getNumBatches(); valueTasks = new ArrayList<Callable<Double>>(numBatches); gradientTasks = new ArrayList<Callable<Boolean>>(numBatches); // number of instances per batch int numBatchInstances = trainingSet.size() / numBatches; // batch assignments int start = -1, end = -1; for (int i = 0; i < numBatches; ++i) { // get the indices of batch if (i == 0) { start = 0; end = start + numBatchInstances; } else if (i == numBatches-1) { start = end; end = trainingSet.size(); } else { start = end; end = start + numBatchInstances; } valueTasks.add(new ValueHandler(i, new int[]{start, end})); gradientTasks.add(new GradientHandler(i, new int[]{start, end})); } }
/** * Creates tasks to be executed in parallel, each task looks at a batch of * data. */ protected void createTasks() { int numBatches = optimizable.getNumBatches(); valueTasks = new ArrayList<Callable<Double>>(numBatches); gradientTasks = new ArrayList<Callable<Boolean>>(numBatches); // number of instances per batch int numBatchInstances = trainingSet.size() / numBatches; // batch assignments int start = -1, end = -1; for (int i = 0; i < numBatches; ++i) { // get the indices of batch if (i == 0) { start = 0; end = start + numBatchInstances; } else if (i == numBatches-1) { start = end; end = trainingSet.size(); } else { start = end; end = start + numBatchInstances; } valueTasks.add(new ValueHandler(i, new int[]{start, end})); gradientTasks.add(new GradientHandler(i, new int[]{start, end})); } }
/** * Initializes the optimizable and starts new threads. * * @param optimizable Optimizable to be parallelized * @param numFactors Number of factors in model's parameters, used to * initialize the gradient * @param cacheIndicator Determines when value/gradient become stale */ public ThreadedOptimizable(Optimizable.ByCombiningBatchGradient optimizable, InstanceList trainingSet, int numFactors, CacheStaleIndicator cacheIndicator) { // set up this.trainingSet = trainingSet; this.optimizable = optimizable; int numBatches = optimizable.getNumBatches(); assert(numBatches > 0) : "Invalid number of batches: " + numBatches; batchCachedValue = new double[numBatches]; batchCachedGradient = new ArrayList<double[]>(numBatches); for (int i = 0; i < numBatches; ++i) { batchCachedGradient.add(new double[numFactors]); } this.cacheIndicator = cacheIndicator; logger.info("Creating " + numBatches + " threads for updating gradient..."); executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(numBatches); this.createTasks(); }
/** * Initializes the optimizable and starts new threads. * * @param optimizable Optimizable to be parallelized * @param numFactors Number of factors in model's parameters, used to * initialize the gradient * @param cacheIndicator Determines when value/gradient become stale */ public ThreadedOptimizable(Optimizable.ByCombiningBatchGradient optimizable, InstanceList trainingSet, int numFactors, CacheStaleIndicator cacheIndicator) { // set up this.trainingSet = trainingSet; this.optimizable = optimizable; int numBatches = optimizable.getNumBatches(); assert(numBatches > 0) : "Invalid number of batches: " + numBatches; batchCachedValue = new double[numBatches]; batchCachedGradient = new ArrayList<double[]>(numBatches); for (int i = 0; i < numBatches; ++i) { batchCachedGradient.add(new double[numFactors]); } this.cacheIndicator = cacheIndicator; logger.info("Creating " + numBatches + " threads for updating gradient..."); executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(numBatches); this.createTasks(); }
/** * Creates tasks to be executed in parallel, each task looks at a batch of * data. */ protected void createTasks() { int numBatches = optimizable.getNumBatches(); valueTasks = new ArrayList<Callable<Double>>(numBatches); gradientTasks = new ArrayList<Callable<Boolean>>(numBatches); // number of instances per batch int numBatchInstances = trainingSet.size() / numBatches; // batch assignments int start = -1, end = -1; for (int i = 0; i < numBatches; ++i) { // get the indices of batch if (i == 0) { start = 0; end = start + numBatchInstances; } else if (i == numBatches-1) { start = end; end = trainingSet.size(); } else { start = end; end = start + numBatchInstances; } valueTasks.add(new ValueHandler(i, new int[]{start, end})); gradientTasks.add(new GradientHandler(i, new int[]{start, end})); } }
/** * Returns the gradient, re-computes if gradient is stale. <p> * * *Note*: Assumes that <tt>buffer</tt> is already initialized. */ public void getValueGradient (double[] buffer) { if (cacheIndicator.isGradientStale()) { // compute values again if required this.getValue(); // compute gradients again try { // run all threads and wait for them to finish executor.invokeAll(gradientTasks); } catch (InterruptedException ie) { ie.printStackTrace(); } } optimizable.combineGradients(batchCachedGradient, buffer); }
/** * Returns the gradient, re-computes if gradient is stale. <p> * * *Note*: Assumes that <tt>buffer</tt> is already initialized. */ public void getValueGradient (double[] buffer) { if (cacheIndicator.isGradientStale()) { // compute values again if required this.getValue(); // compute gradients again try { // run all threads and wait for them to finish executor.invokeAll(gradientTasks); } catch (InterruptedException ie) { ie.printStackTrace(); } } optimizable.combineGradients(batchCachedGradient, buffer); }
public int getNumParameters () { return optimizable.getNumParameters(); }
public void setParameter (int index, double value) { optimizable.setParameter(index, value); }
public void getParameters (double[] buffer) { optimizable.getParameters(buffer); }
public int getNumParameters () { return optimizable.getNumParameters(); }
public int getNumParameters () { return optimizable.getNumParameters(); }
public void setParameters (double [] buff) { optimizable.setParameters(buff); }
public void setParameters (double [] buff) { optimizable.setParameters(buff); }
public void setParameter (int index, double value) { optimizable.setParameter(index, value); }
public void setParameter (int index, double value) { optimizable.setParameter(index, value); }
public double getParameter (int index) { return optimizable.getParameter(index); }