package org.mydeeplearning4j.pic.detection; import org.canova.api.records.reader.RecordReader; import org.canova.api.split.FileSplit; import org.canova.image.recordreader.ImageRecordReader; import org.deeplearning4j.datasets.canova.RecordReaderDataSetIterator; import org.deeplearning4j.datasets.iterator.DataSetIterator; import org.deeplearning4j.nn.api.OptimizationAlgorithm; import org.deeplearning4j.nn.conf.MultiLayerConfiguration; import org.deeplearning4j.nn.conf.NeuralNetConfiguration; import org.deeplearning4j.nn.conf.Updater; import org.deeplearning4j.nn.conf.layers.ConvolutionLayer; import org.deeplearning4j.nn.conf.layers.DenseLayer; import org.deeplearning4j.nn.conf.layers.OutputLayer; import org.deeplearning4j.nn.conf.layers.SubsamplingLayer; import org.deeplearning4j.nn.conf.layers.setup.ConvolutionLayerSetup; import org.deeplearning4j.nn.multilayer.MultiLayerNetwork; import org.deeplearning4j.nn.weights.WeightInit; import org.deeplearning4j.optimize.listeners.ScoreIterationListener; import org.nd4j.linalg.lossfunctions.LossFunctions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.File; import java.util.ArrayList; import java.util.List; /** * Created by bottles on 3/31/2016. */ public class convoltest { private static final Logger log = LoggerFactory.getLogger(convoltest.class); public static void main(String[] args) throws Exception { int nChannels = 1; int outputNum = 10; int batchSize = 1000; int nEpochs = 10; int iterations = 1; int seed = 123; final int numRows = 28; final int numColumns = 28; // Set path to the labeled images. in world cup, there is only a 28*28 pixels jpg image. String labeledPath = "C:\\Users\\Public\\Pictures\\world cup"; //create array of strings called labels List labels = new ArrayList<>(); //traverse dataset to get each label for(File f : new File(labeledPath).listFiles()) { labels.add(f.getName()); } // Instantiating RecordReader. Specify height and width of images. RecordReader recordReader = new ImageRecordReader(numRows, numColumns, true, labels); // Point to data path. recordReader.initialize(new FileSplit(new File(labeledPath))); DataSetIterator mnistTrain = new RecordReaderDataSetIterator(recordReader, numRows*numColumns, labels.size()); log.info("Build model...."); MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder() .seed(seed) .iterations(iterations) .regularization(true).l2(0.0005) .learningRate(0.01) .weightInit(WeightInit.XAVIER) .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT) .updater(Updater.NESTEROVS).momentum(0.9) .list(4) .layer(0, new ConvolutionLayer.Builder(5, 5) .nIn(nChannels) .stride(1, 1) .nOut(20).dropOut(0.5) .activation("relu") .build()) .layer(1, new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX) .kernelSize(2,2) .stride(2,2) .build()) .layer(2, new DenseLayer.Builder().activation("relu") .nOut(500).build()) .layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD) .nOut(outputNum) .activation("softmax") .build()) .backprop(true).pretrain(false); new ConvolutionLayerSetup(builder,numRows, numColumns,1); MultiLayerConfiguration conf = builder.build(); MultiLayerNetwork model = new MultiLayerNetwork(conf); model.init(); log.info("Train model...."); model.setListeners(new ScoreIterationListener(1)); model.fit(mnistTrain); //exception created here log.info("****************Example finished********************"); } }