.layer(5, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) .weightInit(WEIGHT_INIT) .activation("softmax").weightInit(WEIGHT_INIT).learningRateDecayPolicy(learningRatePolicy) .nIn((int) (numHiddenNodes * Math.pow(reduction, 4))).nOut(numOutputs).build()) .pretrain(false).backprop(true);
.layer(5, new OutputLayer.Builder(lossFunction) .weightInit(WEIGHT_INIT) .activation("softmax").weightInit(WEIGHT_INIT).learningRateDecayPolicy(learningRatePolicy) .nIn((int) (numHiddenNodes * Math.pow(reduction, 4))).nOut(numOutputs).build()) .pretrain(false).backprop(true);
.activation("softmax").weightInit(WEIGHT_INIT).learningRateDecayPolicy(learningRatePolicy) .nIn((int) (numHiddenNodes * Math.pow(reduction, 4))).nOut(numOutputs).build()) .pretrain(false).backprop(true);
.addLayer("isMutated", new OutputLayer.Builder(domainDescriptor.getOutputLoss("isMutated")) .weightInit(WEIGHT_INIT) .activation("softmax").weightInit(WEIGHT_INIT).learningRateDecayPolicy(learningRatePolicy) .nIn(nOut4).nOut(numOutputsIsMutated).build(), "dense5") .addLayer("somaticFrequency", new OutputLayer.Builder(domainDescriptor.getOutputLoss("somaticFrequency")) .weightInit(WEIGHT_INIT) .activation("identity").weightInit(WEIGHT_INIT).learningRateDecayPolicy(learningRatePolicy) .nIn(nOut4).nOut(numOutputsSomaticFrequency).build(), "dense5") .setOutputs(getOutputNames())
.addLayer("isMutated", new OutputLayer.Builder(domainDescriptor.getOutputLoss("isMutated")) .weightInit(WEIGHT_INIT) .activation("softmax").weightInit(WEIGHT_INIT).learningRateDecayPolicy(learningRatePolicy) .nIn(nOut4).nOut(numOutputsIsMutated).build(), "dense5") .addLayer("isMutated", new OutputLayer.Builder(domainDescriptor.getOutputLoss("isMutated")) .weightInit(WEIGHT_INIT) .activation("softmax").weightInit(WEIGHT_INIT).learningRateDecayPolicy(learningRatePolicy) .nIn(nOut4).nOut(numOutputsIsMutated).build(), "dense5") .setOutputs(getOutputNames())
.addLayer("isBaseMutated", new OutputLayer.Builder(domainDescriptor.getOutputLoss("isBaseMutated")) .weightInit(WEIGHT_INIT) .activation("softmax").weightInit(WEIGHT_INIT).learningRateDecayPolicy(learningRatePolicy) .nIn((int) (numHiddenNodes * Math.pow(reduction, 4))).nOut(numOutputsIsMutated).build(), "dense5") .addLayer("somaticFrequency", new OutputLayer.Builder(domainDescriptor.getOutputLoss("somaticFrequency")) .weightInit(WEIGHT_INIT) .activation("identity").weightInit(WEIGHT_INIT).learningRateDecayPolicy(learningRatePolicy) .nIn((int) (numHiddenNodes * Math.pow(reduction, 4))).nOut(numOutputsSomaticFrequency).build(), "dense5") .setOutputs(getOutputNames())
domainDescriptor.getOutputLoss("softmaxGenotype")) .weightInit(WEIGHT_INIT) .activation("softmax").weightInit(WEIGHT_INIT).learningRateDecayPolicy(learningRatePolicy) .nIn(numIn) .nOut(domainDescriptor.getNumOutputs("softmaxGenotype")[0]).build(), lastDenseLayerName).addInputs(); .weightInit(WEIGHT_INIT) .activation("softmax").weightInit(WEIGHT_INIT) .learningRateDecayPolicy(LEARNING_RATE_POLICY) .nIn(numIn) .nOut(domainDescriptor.getNumOutputs("numDistinctAlleles")[0]) build.addLayer("homozygous", new OutputLayer.Builder(domainDescriptor.getOutputLoss("homozygous")) .weightInit(WEIGHT_INIT) .activation("softmax").weightInit(WEIGHT_INIT).learningRateDecayPolicy(LEARNING_RATE_POLICY) .nIn(numIn).nOut(11).build(), lastDenseLayerName); break; build.addLayer(combined, new OutputLayer.Builder(domainDescriptor.getOutputLoss(combined)) .weightInit(WEIGHT_INIT) .activation(combined).weightInit(WEIGHT_INIT).learningRateDecayPolicy(LEARNING_RATE_POLICY) .nIn(numIn) .nOut(domainDescriptor.getNumOutputs(combined)[0]).build(), lastDenseLayerName); build.addLayer(outputName, new OutputLayer.Builder(domainDescriptor.getOutputLoss(outputName)) .weightInit(WEIGHT_INIT) .activation("softmax").weightInit(WEIGHT_INIT).learningRateDecayPolicy(LEARNING_RATE_POLICY) .nIn(numIn).nOut(2).build(), lastDenseLayerName);
.weightInit(WEIGHT_INIT) .activation("softmax").weightInit(WEIGHT_INIT) .learningRateDecayPolicy(LEARNING_RATE_POLICY) .nIn(numIn) .nOut(domainDescriptor.getNumOutputs("numDistinctAlleles")[0]) build.addLayer("homozygous", new OutputLayer.Builder(domainDescriptor.getOutputLoss("homozygous")) .weightInit(WEIGHT_INIT) .activation("softmax").weightInit(WEIGHT_INIT).learningRateDecayPolicy(LEARNING_RATE_POLICY) .nIn(numIn).nOut(11).build(), lastDenseLayerName); build.addLayer(outputName, new OutputLayer.Builder(domainDescriptor.getOutputLoss(outputName)) .weightInit(WEIGHT_INIT) .activation("softmax").weightInit(WEIGHT_INIT).learningRateDecayPolicy(LEARNING_RATE_POLICY) .nIn(numIn).nOut(2).build(), lastDenseLayerName); build.addLayer(combined, new OutputLayer.Builder(domainDescriptor.getOutputLoss(combined)) .weightInit(WEIGHT_INIT) .activation(combined).weightInit(WEIGHT_INIT).learningRateDecayPolicy(LEARNING_RATE_POLICY) .nIn(numIn) .nOut(domainDescriptor.getNumOutputs(combined)[0]).build(), lastDenseLayerName);
.layer(3, new OutputLayer.Builder(lossFunction) .weightInit(WEIGHT_INIT) .activation("softmax").weightInit(WEIGHT_INIT).learningRateDecayPolicy(learningRatePolicy) .nIn((int) (numHiddenNodes * Math.pow(reduction, 2))).nOut(numOutputs).build()) .pretrain(false).backprop(true);
.layer(5, new OutputLayer.Builder(lossFunction) .weightInit(WEIGHT_INIT) .activation("softmax").weightInit(WEIGHT_INIT).learningRateDecayPolicy(learningRatePolicy) .nIn((int) (numHiddenNodes)).nOut(numOutputs).build()) .pretrain(false).backprop(true);
domainDescriptor.getOutputLoss("numDistinctAlleles")) .weightInit(WEIGHT_INIT) .activation("softmax").weightInit(WEIGHT_INIT).learningRateDecayPolicy(learningRatePolicy) .nIn(numIn) .nOut(domainDescriptor.getNumOutputs("numDistinctAlleles")[0]).build(), lastDenseLayerName).addInputs(); domainDescriptor.getOutputLoss(outputNames[i])) .weightInit(WEIGHT_INIT) .activation("softmax").weightInit(WEIGHT_INIT).learningRateDecayPolicy(learningRatePolicy) .nIn(numIn).nOut(domainDescriptor.getNumOutputs(outputNames[i])[0]).build(), lastDenseLayerName);
@Override public ComputationGraph createComputationalGraph(DomainDescriptor domainDescriptor) { LearningRatePolicy learningRatePolicy = LearningRatePolicy.Poly; layerAssembler.setLearningRatePolicy(learningRatePolicy); layerAssembler.initializeBuilder(); int numInputs = domainDescriptor.getNumInputs("input")[0]; int numHiddenNodes = domainDescriptor.getNumHiddenNodes("firstDense"); ComputationGraphConfiguration.GraphBuilder build = layerAssembler.assemble(numInputs, numHiddenNodes, numLayers); int numIn = layerAssembler.getNumOutputs(); WeightInit WEIGHT_INIT = WeightInit.XAVIER; String lastDenseLayerName = layerAssembler.lastLayerName(); build.addLayer("homozygous", new OutputLayer.Builder( domainDescriptor.getOutputLoss("homozygous")) .weightInit(WEIGHT_INIT) .activation("softmax").weightInit(WEIGHT_INIT).learningRateDecayPolicy(learningRatePolicy) .nIn(numIn).nOut(11).build(), lastDenseLayerName); for (int i = 1; i < outputNames.length; i++) { build.addLayer(outputNames[i], new OutputLayer.Builder( domainDescriptor.getOutputLoss(outputNames[i])) .weightInit(WEIGHT_INIT) .activation("softmax").weightInit(WEIGHT_INIT).learningRateDecayPolicy(learningRatePolicy) .nIn(numIn).nOut(2).build(), lastDenseLayerName); } appendMetaDataLayer(domainDescriptor, learningRatePolicy, build, numIn, WEIGHT_INIT, lastDenseLayerName); appendIsVariantLayer(domainDescriptor, learningRatePolicy, build, numIn, WEIGHT_INIT, lastDenseLayerName); ComputationGraphConfiguration conf = build .setOutputs(outputNames) .build(); return new ComputationGraph(conf); }
.layer(5, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) .weightInit(WEIGHT_INIT) .activation("softmax").weightInit(WEIGHT_INIT).learningRateDecayPolicy(learningRatePolicy) .nIn(numHiddenNodes).nOut(numOutputs).build()) .pretrain(false).backprop(true);
public MultiLayerConfiguration createNetwork() { MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .seed(seed) .iterations(1) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .learningRate(learningRate).regularization(regularization).l2(regularizationRate) .updater(Updater.ADAGRAD) .list() .layer(0, new DenseLayer.Builder().nIn(numInputs).nOut(numHiddenNodes) .weightInit(WEIGHT_INIT) .activation("relu").learningRateDecayPolicy(learningRatePolicy) .build()) .layer(1, new DenseLayer.Builder().nIn(numHiddenNodes).nOut(numHiddenNodes) .weightInit(WEIGHT_INIT) .activation("relu").learningRateDecayPolicy(learningRatePolicy) .build()) .layer(2, new DenseLayer.Builder().nIn(numHiddenNodes).nOut(numHiddenNodes) .weightInit(WEIGHT_INIT) .activation("relu").learningRateDecayPolicy(learningRatePolicy) .build()) .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.MSE) .weightInit(WEIGHT_INIT) .activation("sigmoid").learningRateDecayPolicy(learningRatePolicy) .nIn(numHiddenNodes).nOut(numOutputs).build()) .pretrain(false).backprop(true).build(); return conf; } }
@Override public ComputationGraph createComputationalGraph(DomainDescriptor domainDescriptor) { LearningRatePolicy learningRatePolicy = LearningRatePolicy.Poly; layerAssembler.setLearningRatePolicy(learningRatePolicy); layerAssembler.initializeBuilder(); int numInputs = domainDescriptor.getNumInputs("input")[0]; int numHiddenNodes = domainDescriptor.getNumHiddenNodes("firstDense"); ComputationGraphConfiguration.GraphBuilder build = layerAssembler.assemble(numInputs, numHiddenNodes, numLayers); int numIn = layerAssembler.getNumOutputs(); WeightInit WEIGHT_INIT = WeightInit.XAVIER; String lastDenseLayerName = layerAssembler.lastLayerName(); String combined = fixRef?"combinedRef":"combined"; build.addLayer(combined, new OutputLayer.Builder( domainDescriptor.getOutputLoss(combined)) .weightInit(WEIGHT_INIT) .activation("softmax").weightInit(WEIGHT_INIT).learningRateDecayPolicy(learningRatePolicy) .nIn(numIn) .nOut(domainDescriptor.getNumOutputs(combined)[0]).build(), lastDenseLayerName); appendMetaDataLayer(domainDescriptor, learningRatePolicy, build, numIn, WEIGHT_INIT, lastDenseLayerName); appendIsVariantLayer(domainDescriptor, learningRatePolicy, build, numIn, WEIGHT_INIT, lastDenseLayerName); ComputationGraphConfiguration conf = build .setOutputs(getOutputNames()) .build(); return new ComputationGraph(conf); }
public MultiLayerConfiguration createNetwork() { MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder() .seed(seed) .iterations(1) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .learningRate(learningRate).regularization(regularization).l2(regularizationRate) .updater(Updater.ADAGRAD) .list() .layer(0, new DenseLayer.Builder().nIn(numInputs).nOut(numHiddenNodes) .weightInit(WEIGHT_INIT) .activation("relu").learningRateDecayPolicy(learningRatePolicy) .build()) .layer(1, new DenseLayer.Builder().nIn(numHiddenNodes).nOut(numHiddenNodes) .weightInit(WEIGHT_INIT) .activation("relu").learningRateDecayPolicy(learningRatePolicy) .build()) .layer(2, new OutputLayer.Builder(lossFunction) .weightInit(WEIGHT_INIT) .activation("softmax").learningRateDecayPolicy(learningRatePolicy) .nIn(numHiddenNodes).nOut(numOutputs).build()) .pretrain(false).backprop(true).build(); return conf; } }
@Override public ComputationGraph createComputationalGraph(DomainDescriptor domainDescriptor) { LearningRatePolicy learningRatePolicy = LearningRatePolicy.Poly; layerAssembler.setLearningRatePolicy(learningRatePolicy); layerAssembler.initializeBuilder(); int numInputs = domainDescriptor.getNumInputs("input")[0]; int numHiddenNodes = domainDescriptor.getNumHiddenNodes("firstDense"); ComputationGraphConfiguration.GraphBuilder build = layerAssembler.assemble(numInputs, numHiddenNodes, numLayers); int numIn = layerAssembler.getNumOutputs(); WeightInit WEIGHT_INIT = WeightInit.XAVIER; String lastDenseLayerName = layerAssembler.lastLayerName(); build.addLayer("softmaxGenotype", new OutputLayer.Builder( domainDescriptor.getOutputLoss("softmaxGenotype")) .weightInit(WEIGHT_INIT) .activation("softmax").weightInit(WEIGHT_INIT).learningRateDecayPolicy(learningRatePolicy) .nIn(numIn) .nOut(domainDescriptor.getNumOutputs("softmaxGenotype")[0]).build(), lastDenseLayerName).addInputs(); appendMetaDataLayer(domainDescriptor, learningRatePolicy, build, numIn, WEIGHT_INIT, lastDenseLayerName); appendIsVariantLayer(domainDescriptor, learningRatePolicy, build, numIn, WEIGHT_INIT, lastDenseLayerName); ComputationGraphConfiguration conf = build .setOutputs(outputNames) .build(); return new ComputationGraph(conf); }
protected void appendMetaDataLayer(DomainDescriptor domainDescriptor, LearningRatePolicy learningRatePolicy, ComputationGraphConfiguration.GraphBuilder build, int numIn, WeightInit WEIGHT_INIT, String lastDenseLayerName) { build.addLayer("metaData", new OutputLayer.Builder( domainDescriptor.getOutputLoss("metaData")) .weightInit(WEIGHT_INIT) .activation("softmax").weightInit(WEIGHT_INIT).learningRateDecayPolicy(learningRatePolicy) .nIn(numIn) .nOut( domainDescriptor.getNumOutputs("metaData")[0] ).build(), lastDenseLayerName); }
protected void appendIsVariantLayer(DomainDescriptor domainDescriptor, LearningRatePolicy learningRatePolicy, ComputationGraphConfiguration.GraphBuilder build, int numIn, WeightInit WEIGHT_INIT, String lastDenseLayerName) { if (hasIsVariant) { build.addLayer("isVariant", new OutputLayer.Builder( domainDescriptor.getOutputLoss("isVariant")) .weightInit(WEIGHT_INIT) .activation("softmax").weightInit(WEIGHT_INIT).learningRateDecayPolicy(learningRatePolicy) .nIn(numIn) .nOut( domainDescriptor.getNumOutputs("isVariant")[0] ).build(), lastDenseLayerName); } }