public static ComputationGraphConfiguration getConf() { ComputationGraphConfiguration.GraphBuilder builder = new NeuralNetConfiguration.Builder() .seed(12345) .updater(new Adam(0.01)) .weightInit(WeightInit.RELU) .graphBuilder() .addInputs("in"); String[] poolNames = new String[ngramFilters.length]; int i = 0; for (int ngram : ngramFilters) { String filterName = String.format("ngram%d", ngram); poolNames[i] = String.format("pool%d", ngram); builder = builder.addLayer(filterName, new Convolution1DLayer.Builder() .nOut(numFilters) .kernelSize(ngram) .activation(Activation.RELU) .build(), "in") .addLayer(poolNames[i], new GlobalPoolingLayer.Builder(PoolingType.MAX).build(), filterName); i++; } return builder.addVertex("concat", new MergeVertex(), poolNames) .addLayer("predict", new DenseLayer.Builder().nOut(numClasses).dropOut(dropoutRetain) .activation(Activation.SOFTMAX).build(), "concat") .addLayer("loss", new LossLayer.Builder(LossFunctions.LossFunction.MCXENT).build(), "predict") .setOutputs("loss") .setInputTypes(InputType.recurrent(W2V_VECTOR_SIZE, 1000)) .build(); } }
InputType[] inputTypes = new InputType[inputTypeList.size()]; inputTypeList.toArray(inputTypes); graphBuilder.setInputTypes(inputTypes);
build.setInputTypes(InputType.recurrent(numLSTMInputs, numTimeSteps)); String lstmInputName = "input"; String lstmLayerName = "no layer";
"embeddings") .setOutputs("lossLayer").backprop(true).pretrain(false) .setInputTypes(InputType.convolutional(inputShape[2], inputShape[1], inputShape[0]));
graph.addInputs("input").setInputTypes(InputType.convolutional(inputShape[2], inputShape[1], inputShape[0]))
builder.setInputTypes(InputType.convolutional(shape[1], shape[2], shape[0]));
gb.setInputTypes(InputType.inferInputType(features));
@Override public GraphConfiguration getValue(double[] values) { //Create ComputationGraphConfiguration... NeuralNetConfiguration.Builder builder = randomGlobalConf(values); ComputationGraphConfiguration.GraphBuilder graphBuilder = builder.graphBuilder(); graphBuilder.addInputs(this.networkInputs); graphBuilder.setOutputs(this.networkOutputs); if (inputTypes != null) graphBuilder.setInputTypes(inputTypes.getValue(values)); //Build/add our layers and vertices: for (LayerConf c : layerSpaces) { org.deeplearning4j.nn.conf.layers.Layer l = c.layerSpace.getValue(values); graphBuilder.addLayer(c.getLayerName(), l, c.getInputs()); } for (VertexConf gv : vertices) { graphBuilder.addVertex(gv.getVertexName(), gv.getGraphVertex(), gv.getInputs()); } if (backprop != null) graphBuilder.backprop(backprop.getValue(values)); if (pretrain != null) graphBuilder.pretrain(pretrain.getValue(values)); if (backpropType != null) graphBuilder.backpropType(backpropType.getValue(values)); if (tbpttFwdLength != null) graphBuilder.tBPTTForwardLength(tbpttFwdLength.getValue(values)); if (tbpttBwdLength != null) graphBuilder.tBPTTBackwardLength(tbpttBwdLength.getValue(values)); ComputationGraphConfiguration configuration = graphBuilder.build(); return new GraphConfiguration(configuration, earlyStoppingConfiguration, numEpochs); }
/** * Build the multilayer network defined by the networkconfiguration and the list of layers. */ protected void createModel() throws Exception { final INDArray features = getFirstBatchFeatures(trainData); ComputationGraphConfiguration.GraphBuilder gb = netConfig.builder().seed(getSeed()).graphBuilder(); // Set ouput size final Layer lastLayer = layers[layers.length - 1]; final int nOut = trainData.numClasses(); if (lastLayer instanceof FeedForwardLayer) { ((FeedForwardLayer) lastLayer).setNOut(nOut); } if (getInstanceIterator() instanceof CnnTextEmbeddingInstanceIterator) { makeCnnTextLayerSetup(gb); } else { makeDefaultLayerSetup(gb); } gb.setInputTypes(InputType.inferInputType(features)); ComputationGraphConfiguration conf = gb.pretrain(false).backprop(true).build(); ComputationGraph model = new ComputationGraph(conf); model.init(); this.model = model; }
@Override public ComputationGraph init() { int embeddingSize = 128; ComputationGraphConfiguration.GraphBuilder graph = graphBuilder("input1"); graph.addInputs("input1").setInputTypes(InputType.convolutional(inputShape[2], inputShape[1], inputShape[0])) // Logits .addLayer("bottleneck", new DenseLayer.Builder().nIn(5376).nOut(embeddingSize).build(), "avgpool") // Embeddings .addVertex("embeddings", new L2NormalizeVertex(new int[] {1}, 1e-10), "bottleneck") // Output .addLayer("outputLayer", new CenterLossOutputLayer.Builder() .lossFunction(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) .activation(Activation.SOFTMAX).alpha(0.9).lambda(1e-4) .nIn(embeddingSize).nOut(numClasses).build(), "embeddings") .setOutputs("outputLayer").backprop(true).pretrain(false); ComputationGraphConfiguration conf = graph.build(); ComputationGraph model = new ComputationGraph(conf); model.init(); return model; }
public void setInputTypes(InputType... inputTypes) { build.setInputTypes(inputTypes); }
/** * Sets the input type of corresponding inputs. * @param inputTypes The type of input (such as convolutional). * @return {@code GraphBuilder} instance. */ public GraphBuilder setInputTypes(InputType... inputTypes) { editedConfigBuilder.setInputTypes(inputTypes); return this; }