/** * <p>The constructors.</p> * * <p>The advanced constructor allows to create MLP with the specified topology. * See "CvANN_MLP.create" for details.</p> * * @see <a href="http://docs.opencv.org/modules/ml/doc/neural_networks.html#cvann-mlp-cvann-mlp">org.opencv.ml.CvANN_MLP.CvANN_MLP</a> */ public CvANN_MLP() { super( CvANN_MLP_0() ); return; }
/** * <p>The constructors.</p> * * <p>The advanced constructor allows to create MLP with the specified topology. * See "CvANN_MLP.create" for details.</p> * * @param layerSizes a layerSizes * @param activateFunc a activateFunc * @param fparam1 a fparam1 * @param fparam2 a fparam2 * * @see <a href="http://docs.opencv.org/modules/ml/doc/neural_networks.html#cvann-mlp-cvann-mlp">org.opencv.ml.CvANN_MLP.CvANN_MLP</a> */ public CvANN_MLP(Mat layerSizes, int activateFunc, double fparam1, double fparam2) { super( CvANN_MLP_1(layerSizes.nativeObj, activateFunc, fparam1, fparam2) ); return; }
/** * <p>The constructors.</p> * * <p>The advanced constructor allows to create MLP with the specified topology. * See "CvANN_MLP.create" for details.</p> * * @param layerSizes a layerSizes * * @see <a href="http://docs.opencv.org/modules/ml/doc/neural_networks.html#cvann-mlp-cvann-mlp">org.opencv.ml.CvANN_MLP.CvANN_MLP</a> */ public CvANN_MLP(Mat layerSizes) { super( CvANN_MLP_2(layerSizes.nativeObj) ); return; }
public void clear() { clear_0(nativeObj); return; }
@Override protected void finalize() throws Throwable { delete(nativeObj); }
/** * <p>Constructs MLP with the specified topology.</p> * * <p>The method creates an MLP network with the specified topology and assigns the * same activation function to all the neurons.</p> * * @param layerSizes Integer vector specifying the number of neurons in each * layer including the input and output layers. * * @see <a href="http://docs.opencv.org/modules/ml/doc/neural_networks.html#cvann-mlp-create">org.opencv.ml.CvANN_MLP.create</a> */ public void create(Mat layerSizes) { create_1(nativeObj, layerSizes.nativeObj); return; }
/** * <p>Predicts responses for input samples.</p> * * <p>The method returns a dummy value which should be ignored.</p> * * <p>If you are using the default <code>cvANN_MLP.SIGMOID_SYM</code> activation * function with the default parameter values fparam1=0 and fparam2=0 then the * function used is y = 1.7159*tanh(2/3 * x), so the output will range from * [-1.7159, 1.7159], instead of [0,1].</p> * * @param inputs Input samples. * @param outputs Predicted responses for corresponding samples. * * @see <a href="http://docs.opencv.org/modules/ml/doc/neural_networks.html#cvann-mlp-predict">org.opencv.ml.CvANN_MLP.predict</a> */ public float predict(Mat inputs, Mat outputs) { float retVal = predict_0(nativeObj, inputs.nativeObj, outputs.nativeObj); return retVal; }
/** * <p>Constructs MLP with the specified topology.</p> * * <p>The method creates an MLP network with the specified topology and assigns the * same activation function to all the neurons.</p> * * @param layerSizes Integer vector specifying the number of neurons in each * layer including the input and output layers. * @param activateFunc Parameter specifying the activation function for each * neuron: one of <code>CvANN_MLP.IDENTITY</code>, <code>CvANN_MLP.SIGMOID_SYM</code>, * and <code>CvANN_MLP.GAUSSIAN</code>. * @param fparam1 Free parameter of the activation function, <em>alpha</em>. See * the formulas in the introduction section. * @param fparam2 Free parameter of the activation function, <em>beta</em>. See * the formulas in the introduction section. * * @see <a href="http://docs.opencv.org/modules/ml/doc/neural_networks.html#cvann-mlp-create">org.opencv.ml.CvANN_MLP.create</a> */ public void create(Mat layerSizes, int activateFunc, double fparam1, double fparam2) { create_0(nativeObj, layerSizes.nativeObj, activateFunc, fparam1, fparam2); return; }
/** * <p>Trains/updates MLP.</p> * * <p>This method applies the specified training algorithm to computing/adjusting * the network weights. It returns the number of done iterations.</p> * * <p>The RPROP training algorithm is parallelized with the TBB library.</p> * * <p>If you are using the default <code>cvANN_MLP.SIGMOID_SYM</code> activation * function then the output should be in the range [-1,1], instead of [0,1], for * optimal results.</p> * * @param inputs Floating-point matrix of input vectors, one vector per row. * @param outputs Floating-point matrix of the corresponding output vectors, one * vector per row. * @param sampleWeights (RPROP only) Optional floating-point vector of weights * for each sample. Some samples may be more important than others for training. * You may want to raise the weight of certain classes to find the right balance * between hit-rate and false-alarm rate, and so on. * * @see <a href="http://docs.opencv.org/modules/ml/doc/neural_networks.html#cvann-mlp-train">org.opencv.ml.CvANN_MLP.train</a> */ public int train(Mat inputs, Mat outputs, Mat sampleWeights) { int retVal = train_1(nativeObj, inputs.nativeObj, outputs.nativeObj, sampleWeights.nativeObj); return retVal; }
int retVal = train_0(nativeObj, inputs.nativeObj, outputs.nativeObj, sampleWeights.nativeObj, sampleIdx.nativeObj, params.nativeObj, flags);
public void clear() { clear_0(nativeObj); return; }
@Override protected void finalize() throws Throwable { delete(nativeObj); }
/** * <p>Constructs MLP with the specified topology.</p> * * <p>The method creates an MLP network with the specified topology and assigns the * same activation function to all the neurons.</p> * * @param layerSizes Integer vector specifying the number of neurons in each * layer including the input and output layers. * * @see <a href="http://docs.opencv.org/modules/ml/doc/neural_networks.html#cvann-mlp-create">org.opencv.ml.CvANN_MLP.create</a> */ public void create(Mat layerSizes) { create_1(nativeObj, layerSizes.nativeObj); return; }
/** * <p>Predicts responses for input samples.</p> * * <p>The method returns a dummy value which should be ignored.</p> * * <p>If you are using the default <code>cvANN_MLP.SIGMOID_SYM</code> activation * function with the default parameter values fparam1=0 and fparam2=0 then the * function used is y = 1.7159*tanh(2/3 * x), so the output will range from * [-1.7159, 1.7159], instead of [0,1].</p> * * @param inputs Input samples. * @param outputs Predicted responses for corresponding samples. * * @see <a href="http://docs.opencv.org/modules/ml/doc/neural_networks.html#cvann-mlp-predict">org.opencv.ml.CvANN_MLP.predict</a> */ public float predict(Mat inputs, Mat outputs) { float retVal = predict_0(nativeObj, inputs.nativeObj, outputs.nativeObj); return retVal; }
/** * <p>Constructs MLP with the specified topology.</p> * * <p>The method creates an MLP network with the specified topology and assigns the * same activation function to all the neurons.</p> * * @param layerSizes Integer vector specifying the number of neurons in each * layer including the input and output layers. * @param activateFunc Parameter specifying the activation function for each * neuron: one of <code>CvANN_MLP.IDENTITY</code>, <code>CvANN_MLP.SIGMOID_SYM</code>, * and <code>CvANN_MLP.GAUSSIAN</code>. * @param fparam1 Free parameter of the activation function, <em>alpha</em>. See * the formulas in the introduction section. * @param fparam2 Free parameter of the activation function, <em>beta</em>. See * the formulas in the introduction section. * * @see <a href="http://docs.opencv.org/modules/ml/doc/neural_networks.html#cvann-mlp-create">org.opencv.ml.CvANN_MLP.create</a> */ public void create(Mat layerSizes, int activateFunc, double fparam1, double fparam2) { create_0(nativeObj, layerSizes.nativeObj, activateFunc, fparam1, fparam2); return; }
/** * <p>Trains/updates MLP.</p> * * <p>This method applies the specified training algorithm to computing/adjusting * the network weights. It returns the number of done iterations.</p> * * <p>The RPROP training algorithm is parallelized with the TBB library.</p> * * <p>If you are using the default <code>cvANN_MLP.SIGMOID_SYM</code> activation * function then the output should be in the range [-1,1], instead of [0,1], for * optimal results.</p> * * @param inputs Floating-point matrix of input vectors, one vector per row. * @param outputs Floating-point matrix of the corresponding output vectors, one * vector per row. * @param sampleWeights (RPROP only) Optional floating-point vector of weights * for each sample. Some samples may be more important than others for training. * You may want to raise the weight of certain classes to find the right balance * between hit-rate and false-alarm rate, and so on. * * @see <a href="http://docs.opencv.org/modules/ml/doc/neural_networks.html#cvann-mlp-train">org.opencv.ml.CvANN_MLP.train</a> */ public int train(Mat inputs, Mat outputs, Mat sampleWeights) { int retVal = train_1(nativeObj, inputs.nativeObj, outputs.nativeObj, sampleWeights.nativeObj); return retVal; }
int retVal = train_0(nativeObj, inputs.nativeObj, outputs.nativeObj, sampleWeights.nativeObj, sampleIdx.nativeObj, params.nativeObj, flags);
public void clear() { clear_0(nativeObj); return; }
@Override protected void finalize() throws Throwable { delete(nativeObj); }
/** * <p>The constructors.</p> * * <p>The advanced constructor allows to create MLP with the specified topology. * See "CvANN_MLP.create" for details.</p> * * @see <a href="http://docs.opencv.org/modules/ml/doc/neural_networks.html#cvann-mlp-cvann-mlp">org.opencv.ml.CvANN_MLP.CvANN_MLP</a> */ public CvANN_MLP() { super( CvANN_MLP_0() ); return; }