Skip to content

Instantly share code, notes, and snippets.

View ravithejaburugu's full-sized avatar

Ravi theja burugu ravithejaburugu

  • Hyderabad, Telangana, India
View GitHub Profile
@ravithejaburugu
ravithejaburugu / readme.md
Created April 18, 2017 07:51 — forked from baraldilorenzo/readme.md
VGG-16 pre-trained model for Keras

##VGG16 model for Keras

This is the Keras model of the 16-layer network used by the VGG team in the ILSVRC-2014 competition.

It has been obtained by directly converting the Caffe model provived by the authors.

Details about the network architecture can be found in the following arXiv paper:

Very Deep Convolutional Networks for Large-Scale Image Recognition

K. Simonyan, A. Zisserman

package org.deeplearning4j.rl4j;
import org.deeplearning4j.nn.api.OptimizationAlgorithm;
import org.deeplearning4j.nn.conf.MultiLayerConfiguration;
import org.deeplearning4j.nn.conf.NeuralNetConfiguration;
import org.deeplearning4j.nn.conf.Updater;
import org.deeplearning4j.nn.conf.layers.DenseLayer;
import org.deeplearning4j.nn.conf.layers.OutputLayer;
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork;
import org.deeplearning4j.nn.weights.WeightInit;
conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.iterations(1)
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
.learningRate(0.1)
.updater(Updater.NESTEROVS)
.momentum(0.9)
.list()
.layer(0, new DenseLayer.Builder().nIn(258).nOut(1000)
.weightInit(WeightInit.XAVIER)
Nd4j.ENFORCE_NUMERICAL_STABILITY = true;
LayerFactory layerFactory = LayerFactories.getFactory(ConvolutionDownSampleLayer.class);
int batchSize = 1000;
/**
*
*/
Nd4j.MAX_ELEMENTS_PER_SLICE = Integer.MAX_VALUE;
Nd4j.MAX_ELEMENTS_PER_SLICE = Integer.MAX_VALUE;
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.optimizationAlgo(OptimizationAlgorithm.LBFGS).momentum(0.9)
@ravithejaburugu
ravithejaburugu / mnnlConfig
Created January 21, 2017 19:12 — forked from Tachyon5/mnnlConfig
Explanation of Hidden and Visible in a layer.
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.gradientNormalization(GradientNormalization.ClipElementWiseAbsoluteValue)
.gradientNormalizationThreshold(1.0)
.iterations(iterations)
.momentum(0.5)
.momentumAfter(Collections.singletonMap(3, 0.9))
.optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
.list(4)
.layer(0, new RBM.Builder().nIn(numRows*numColumns).nOut(500)
MultiLayerNetwork net;
//two hidden layers of 3 neurons each
final int[] LSTMLayers = new int[]{3,3};
NeuralNetConfiguration.ListBuilder list = new NeuralNetConfiguration.Builder()
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT).iterations(1)
.learningRate(learningRate)
.regularization(true).l2(0.0000001)
.seed(76692)
.weightInit(WeightInit.XAVIER)
.updater(Updater.ADAM).adamMeanDecay(0.99).adamVarDecay(0.9999)
RandomGenerator gen = new MersenneTwister(123);
MultiLayerConfiguration encoderConf = new NeuralNetConfiguration.Builder().weightInit(WeightInit.VI)
.layerFactory(LayerFactories.getFactory(RBM.class))
.iterations(5)
.lossFunction(LossFunctions.LossFunction.RECONSTRUCTION_CROSSENTROPY).rng(gen)
.learningRate(1e-1).nIn(784).nOut(30).list(3)
.hiddenLayerSizes(new int[]{2000, 1000, 500})
.build();
package org.deeplearning4j.models.featuredetectors.rbm;
import static org.junit.Assert.*;
import org.deeplearning4j.linalg.api.ndarray.INDArray;
import org.deeplearning4j.linalg.factory.NDArrays;
import org.deeplearning4j.linalg.lossfunctions.LossFunctions;
import org.deeplearning4j.nn.api.Model;
import org.deeplearning4j.nn.conf.NeuralNetConfiguration;
import org.junit.Test;
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.iterations(1)
.weightInit(WeightInit.XAVIER)
.activation("relu")
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
.learningRate(0.05)
// ... other hyperparameters
.backprop(true)
.build();