.updater(new Nesterovs(0.1))// To configure: .updater(Nesterovs.builder().momentum(0.9).build()) .l2(1e-4) .list() .layer(0, new DenseLayer.Builder().nIn(28 * 28).nOut(500).build()) .layer(1, new DenseLayer.Builder().nIn(500).nOut(100).build())
.weightInit(WeightInit.XAVIER) .updater(new RmsProp(0.1)) .list() .layer(0, new LSTM.Builder().nIn(CHAR_TO_INT.size()).nOut(lstmLayerSize).activation(Activation.TANH).build()) .layer(1, new LSTM.Builder().nIn(lstmLayerSize).nOut(lstmLayerSize).activation(Activation.TANH).build())
.updater(new Nesterovs(0.02))// To configure: .updater(Nesterovs.builder().momentum(0.9).build()) .l2(1e-4) .list() .layer(0, new DenseLayer.Builder().nIn(28 * 28).nOut(500).build()) .layer(1, new DenseLayer.Builder().nIn(500).nOut(100).build())
public void buildModel() { if (model == null) { int iterations = 1000; long seed = 6; MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .seed(seed) .iterations(iterations) .activation(Activation.TANH) .weightInit(WeightInit.XAVIER) .learningRate(0.1) .regularization(true).l2(1e-4) .list() .layer(0, new DenseLayer.Builder().nIn(numInputs).nOut(3) .build()) .layer(1, new DenseLayer.Builder().nIn(3).nOut(3) .build()) .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MEAN_SQUARED_LOGARITHMIC_ERROR) .activation(Activation.SOFTMAX) .nIn(3).nOut(numClasses).build()) .backprop(true).pretrain(false) .build(); //run the model model = new MultiLayerNetwork(conf); model.init(); model.setListeners(iterationListener); } }
public static void main(String[] args){ //Generate the training data DataSetIterator iterator = getTrainingData(batchSize,rng); //Create the network int numInput = 2; int numOutputs = 1; MultiLayerNetwork net = new MultiLayerNetwork(new NeuralNetConfiguration.Builder() .seed(seed) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .weightInit(WeightInit.XAVIER) .updater(new Sgd(learningRate)) .list() .layer(0, new OutputLayer.Builder(LossFunctions.LossFunction.MSE) .activation(Activation.IDENTITY) .nIn(numInput).nOut(numOutputs).build()) .pretrain(false).backprop(true).build() ); net.init(); net.setListeners(new ScoreIterationListener(1)); for( int i=0; i<nEpochs; i++ ){ iterator.reset(); net.fit(iterator); } final INDArray input = Nd4j.create(new double[] { 0.111111, 0.3333333333333 }, new int[] { 1, 2 }); INDArray out = net.output(input, false); System.out.println(out); }
public static void main(String[] args) throws Exception { //Define a simple MultiLayerNetwork: MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .weightInit(WeightInit.XAVIER) .updater(new Nesterovs(0.01, 0.9)) .list() .layer(0, new DenseLayer.Builder().nIn(4).nOut(3).activation(Activation.TANH).build()) .layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD).activation(Activation.SOFTMAX).nIn(3).nOut(3).build()) .backprop(true).pretrain(false).build(); MultiLayerNetwork net = new MultiLayerNetwork(conf); net.init(); //Save the model File locationToSave = new File("model/MyMultiLayerNetwork.zip"); //Where to save the network. Note: the file is in .zip format - can be opened externally /** * 主要是用于保存模型的更新器信息 * 如果模型保存之后还打算继续训练,则进行保存 -> true 才能根据后面的数据进行增量更新 * 如果不打算继续训练 -> 模型定型之后,false */ boolean saveUpdater = true; //Updater: i.e., the state for Momentum, RMSProp, Adagrad etc. Save this if you want to train your network more in the future ModelSerializer.writeModel(net, locationToSave, saveUpdater); //Load the model MultiLayerNetwork restored = ModelSerializer.restoreMultiLayerNetwork(locationToSave); System.out.println("Saved and loaded parameters are equal: " + net.params().equals(restored.params())); System.out.println("Saved and loaded configurations are equal: " + net.getLayerWiseConfigurations().equals(restored.getLayerWiseConfigurations())); }
public MultiLayerConfiguration createNetwork() { MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .seed(seed) .iterations(1) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .learningRate(learningRate).regularization(regularization).l2(regularizationRate) .updater(Updater.ADAGRAD) .list() .layer(0, new DenseLayer.Builder().nIn(numInputs).nOut(numHiddenNodes) .weightInit(WEIGHT_INIT) .activation("relu").learningRateDecayPolicy(learningRatePolicy) .build()) .layer(1, new DenseLayer.Builder().nIn(numHiddenNodes).nOut(numHiddenNodes) .weightInit(WEIGHT_INIT) .activation("relu").learningRateDecayPolicy(learningRatePolicy) .build()) .layer(2, new OutputLayer.Builder(lossFunction) .weightInit(WEIGHT_INIT) .activation("softmax").learningRateDecayPolicy(learningRatePolicy) .nIn(numHiddenNodes).nOut(numOutputs).build()) .pretrain(false).backprop(true).build(); return conf; } }
/** Returns the network configuration, 2 hidden DenseLayers of size 50. */ private static MultiLayerConfiguration getDeepDenseLayerNetworkConfiguration() { final int numHiddenNodes = 100; return new NeuralNetConfiguration.Builder() .seed(seed) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .weightInit(WeightInit.XAVIER) .updater(new Nesterovs(learningRate, 0.9)) .list() .layer(0, new DenseLayer.Builder().nIn(numInputs).nOut(numHiddenNodes) .activation(Activation.RELU).build()) .layer(1, new DenseLayer.Builder().nIn(numHiddenNodes).nOut(numHiddenNodes) .activation(Activation.RELU).build()) .layer(2, new OutputLayer.Builder(LossFunctions.LossFunction.MSE) .activation(Activation.IDENTITY) .nIn(numHiddenNodes).nOut(numOutputs).build()) .pretrain(false).backprop(true).build(); }
public MultiLayerConfiguration conf() { MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).iterations(1) .learningRate(0.01).seed(12345).regularization(true).l2(0.001).weightInit(WeightInit.XAVIER) .updater(new RmsProp()).list() .layer(0, new GravesLSTM.Builder().nIn(inputShape[1]).nOut(256).activation(Activation.TANH) .build()) .layer(1, new GravesLSTM.Builder().nOut(256).activation(Activation.TANH).build()) .layer(2, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT) .activation(Activation.SOFTMAX) //MCXENT + softmax for classification .nOut(totalUniqueCharacters).build()) .backpropType(BackpropType.TruncatedBPTT).tBPTTForwardLength(50).tBPTTBackwardLength(50) .pretrain(false).backprop(true).build(); return conf; }
.updater(new Nesterovs.Builder().learningRate(.01).build()) .biasUpdater(new Nesterovs.Builder().learningRate(0.02).build()) .list() .layer(0, new ConvolutionLayer.Builder(5, 5)
.updater(new Nesterovs.Builder().learningRate(.01).build()) .biasUpdater(new Nesterovs.Builder().learningRate(0.02).build()) .list() .layer(0, new ConvolutionLayer.Builder(5, 5)
.l2(0.001) //l2 regularization on all layers .updater(new AdaGrad.Builder().learningRate(0.04).build()) .list() .layer(0, new ConvolutionLayer.Builder(10, 10) .nIn(3) //3 channels: RGB
.updater(new Nesterovs.Builder().learningRate(.01).build()) .biasUpdater(new Nesterovs.Builder().learningRate(0.02).build()) .list() .layer(0, new ConvolutionLayer.Builder(5, 5)
.weightInit(WeightInit.XAVIER) .updater(new RmsProp.Builder().learningRate(0.1).build()) .list() .layer(0, new LSTM.Builder().nIn(iter.inputColumns()).nOut(lstmLayerSize) .activation(Activation.TANH).build())
.weightInit(WeightInit.XAVIER) .gradientNormalization(GradientNormalization.ClipElementWiseAbsoluteValue).gradientNormalizationThreshold(1.0) .list() .layer(0, new LSTM.Builder().nIn(vectorSize).nOut(256) .activation(Activation.TANH).build())
.regularization(true) .l2(lambdaReg) .list() .layer(0, new CDAELayer.Builder().nIn(inputDim).nOut(hiddenDim) .activation(Activation.fromString(hiddenActivation))
public static MultiLayerNetwork lenetModel() { /** * Revisde Lenet Model approach developed by ramgo2 achieves slightly above random * Reference: https://gist.github.com/ramgo2/833f12e92359a2da9e5c2fb6333351c5 **/ MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .seed(seed) .l2(0.005) // tried 0.0001, 0.0005 .activation(Activation.RELU) .weightInit(WeightInit.XAVIER) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Nesterovs(0.0001,0.9)) .list() .layer(0, new ConvolutionLayer.Builder(new int[]{5, 5}, new int[]{1, 1}, new int[]{0, 0}).name("cnn1") .nIn(channels).nOut(50).biasInit(0).build()) .layer(1, new SubsamplingLayer.Builder(new int[]{2,2}, new int[]{2,2}).name("maxpool1").build()) .layer(2, new ConvolutionLayer.Builder(new int[]{5,5}, new int[]{5, 5}, new int[]{1, 1}).name("cnn2") .nOut(100).biasInit(0).build()) .layer(3, new SubsamplingLayer.Builder(new int[]{2,2}, new int[]{2,2}).name("maxpool2").build()) .layer(4, new DenseLayer.Builder().nOut(500).build()) .layer(5, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) .nOut(4) .activation(Activation.SOFTMAX) .build()) .backprop(true).pretrain(false) .setInputType(InputType.convolutional(height, width, channels)) .build(); return new MultiLayerNetwork(conf); }
.regularization(true) .l2(lambdaReg) .list() .layer(0, new DenseLayer.Builder().nIn(inputDim).nOut(hiddenDim)
public static MultiLayerConfiguration lenetModelConf() { MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .seed(seed) .l2(0.005) .activation(Activation.RELU) .weightInit(WeightInit.XAVIER) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(new Nesterovs(0.0001, 0.9)) .list() .layer(0, new ConvolutionLayer.Builder(new int[]{5, 5}, new int[]{1, 1}, new int[]{0, 0}).name("cnn1") .nIn(channels).nOut(50).biasInit(0).build()) .layer(1, new SubsamplingLayer.Builder(new int[]{2,2}, new int[]{2,2}).name("maxpool1").build()) .layer(2, new ConvolutionLayer.Builder(new int[]{5,5}, new int[]{5, 5}, new int[]{1, 1}).name("cnn2") .nOut(100).biasInit(0).build()) .layer(3, new SubsamplingLayer.Builder(new int[]{2,2}, new int[]{2,2}).name("maxpool2").build()) .layer(4, new DenseLayer.Builder().nOut(500).build()) .layer(5, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) .nOut(4) .activation(Activation.SOFTMAX) .build()) .backprop(true).pretrain(false) .setInputType(InputType.convolutional(height, width, channels)) .build(); return conf; } public static void saveModel(FileSystem fs, Model model ) throws Exception{
private static MultiLayerConfiguration getConfiguration(){ int lstmLayerSize = 200; //Number of units in each LSTM layer int tbpttLength = 50; //Length for truncated backpropagation through time. i.e., do parameter updates ever 50 characters Map<Character, Integer> CHAR_TO_INT = SparkLSTMCharacterExample.getCharToInt(); int nIn = CHAR_TO_INT.size(); int nOut = CHAR_TO_INT.size(); //Set up network configuration: MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .updater(new Nesterovs(0.1)) .seed(12345) .l2(0.001) .weightInit(WeightInit.XAVIER) .list() .layer(0, new LSTM.Builder().nIn(nIn).nOut(lstmLayerSize).activation(Activation.TANH).build()) .layer(1, new LSTM.Builder().nIn(lstmLayerSize).nOut(lstmLayerSize).activation(Activation.TANH).build()) .layer(2, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MCXENT).activation(Activation.SOFTMAX) //MCXENT + softmax for classification .nIn(lstmLayerSize).nOut(nOut).build()) .backpropType(BackpropType.TruncatedBPTT).tBPTTForwardLength(tbpttLength).tBPTTBackwardLength(tbpttLength) .pretrain(false).backprop(true) .build(); return conf; } }