Package com.github.neuralnetworks.input

Examples of com.github.neuralnetworks.input.ScalingInputFunction


  assertEquals(2, dbn.getNeuralNetworks().size(), 0);

  dbn.setLayerCalculator(NNFactory.lcSigmoid(dbn, null));

  IrisInputProvider trainInputProvider = new IrisInputProvider(new IrisTargetMultiNeuronOutputConverter(), false);
  trainInputProvider.addInputModifier(new ScalingInputFunction(trainInputProvider));

  IrisInputProvider testInputProvider = new IrisInputProvider(new IrisTargetMultiNeuronOutputConverter(), false);
  testInputProvider.addInputModifier(new ScalingInputFunction(testInputProvider));

  // rbm trainers for each layer
  AparapiCDTrainer firstTrainer = TrainerFactory.cdSigmoidBinaryTrainer(dbn.getFirstNeuralNetwork(), null, null, null, new NNRandomInitializer(new MersenneTwisterRandomInitializer(-0.01f, 0.01f)), 0.01f, 0.5f, 0f, 0f, 1, 150, 1000, true);
  AparapiCDTrainer lastTrainer = TrainerFactory.cdSigmoidBinaryTrainer(dbn.getLastNeuralNetwork(), null, null, null, new NNRandomInitializer(new MersenneTwisterRandomInitializer(-0.01f, 0.01f)), 0.01f, 0.5f, 0f, 0f, 1, 150, 1000, true);
View Full Code Here


  Environment.getInstance().setUseDataSharedMemory(true);
      Autoencoder ae = NNFactory.autoencoderSigmoid(4, 3, true);

      // training, testing and error
      IrisInputProvider trainInputProvider = new IrisInputProvider(new IrisTargetMultiNeuronOutputConverter(), false);
  trainInputProvider.addInputModifier(new ScalingInputFunction(trainInputProvider));

  IrisInputProvider testInputProvider = new IrisInputProvider(new IrisTargetMultiNeuronOutputConverter(), false);
  testInputProvider.addInputModifier(new ScalingInputFunction(testInputProvider));

      MultipleNeuronsOutputError error = new MultipleNeuronsOutputError();

      // backpropagation autoencoder training
      BackPropagationAutoencoder bae = TrainerFactory.backPropagationAutoencoder(ae, trainInputProvider, testInputProvider, error, new NNRandomInitializer(new MersenneTwisterRandomInitializer(-0.01f, 0.01f)), 0.02f, 0.7f, 0f, 0f, 0f, 1, 1, 100);
View Full Code Here

  Environment.getInstance().setUseWeightsSharedMemory(true);
  StackedAutoencoder sae = NNFactory.saeSigmoid(new int[] { 4, 4, 3 }, true);

  // data and error providers
  IrisInputProvider trainInputProvider = new IrisInputProvider(new IrisTargetMultiNeuronOutputConverter(), false);
  trainInputProvider.addInputModifier(new ScalingInputFunction(trainInputProvider));

  IrisInputProvider testInputProvider = new IrisInputProvider(new IrisTargetMultiNeuronOutputConverter(), false);
  testInputProvider.addInputModifier(new ScalingInputFunction(testInputProvider));

  // stacked networks
  Autoencoder firstNN = sae.getFirstNeuralNetwork();
  firstNN.setLayerCalculator(NNFactory.lcSigmoid(firstNN, null));
View Full Code Here

  NeuralNetworkImpl mlp = NNFactory.mlpSigmoid(new int[] { 3072, 10 }, true);

  CIFAR10TrainingInputProvider trainInputProvider = new CIFAR10TrainingInputProvider("cifar-10-batches-bin"); // specify your own path
  trainInputProvider.getProperties().setGroupByChannel(true);
  trainInputProvider.getProperties().setScaleColors(true);
  trainInputProvider.addInputModifier(new ScalingInputFunction(255));

  // specify your own path
  CIFAR10TestingInputProvider testInputProvider = new CIFAR10TestingInputProvider("cifar-10-batches-bin"); // specify your own path
  testInputProvider.getProperties().setGroupByChannel(true);
  testInputProvider.getProperties().setScaleColors(true);
  testInputProvider.addInputModifier(new ScalingInputFunction(255));

  BackPropagationTrainer<?> bpt = TrainerFactory.backPropagation(mlp, trainInputProvider, testInputProvider, new MultipleNeuronsOutputError(), new NNRandomInitializer(new RandomInitializerImpl(new Random(), -0.01f, 0.01f)), 0.02f, 0.5f, 0f, 0f, 0f, 1, 1000, 1);

  bpt.addEventListener(new LogTrainingListener(Thread.currentThread().getStackTrace()[1].getMethodName(), false, true));
View Full Code Here

  Environment.getInstance().setUseDataSharedMemory(false);
  Environment.getInstance().setUseWeightsSharedMemory(false);
  NeuralNetworkImpl mlp = NNFactory.mlpSigmoid(new int[] { 784, 10 }, true);

  MnistInputProvider trainInputProvider = new MnistInputProvider("train-images.idx3-ubyte", "train-labels.idx1-ubyte");
  trainInputProvider.addInputModifier(new ScalingInputFunction(255));
  MnistInputProvider testInputProvider = new MnistInputProvider("t10k-images.idx3-ubyte", "t10k-labels.idx1-ubyte");
  testInputProvider.addInputModifier(new ScalingInputFunction(255));

  BackPropagationTrainer<?> bpt = TrainerFactory.backPropagation(mlp, trainInputProvider, testInputProvider, new MultipleNeuronsOutputError(), new NNRandomInitializer(new MersenneTwisterRandomInitializer(-0.01f, 0.01f)), 0.02f, 0.5f, 0f, 0f, 0f, 1, 1000, 1);

  bpt.addEventListener(new LogTrainingListener(Thread.currentThread().getStackTrace()[1].getMethodName(), false, true));
View Full Code Here

    @Test
    public void testSigmoidHiddenBP() {
  NeuralNetworkImpl mlp = NNFactory.mlpSigmoid(new int[] { 784, 300, 100, 10 }, true);

  MnistInputProvider trainInputProvider = new MnistInputProvider("train-images.idx3-ubyte", "train-labels.idx1-ubyte");
  trainInputProvider.addInputModifier(new ScalingInputFunction(255));
  MnistInputProvider testInputProvider = new MnistInputProvider("t10k-images.idx3-ubyte", "t10k-labels.idx1-ubyte");
  testInputProvider.addInputModifier(new ScalingInputFunction(255));

  BackPropagationTrainer<?> bpt = TrainerFactory.backPropagation(mlp, trainInputProvider, testInputProvider, new MultipleNeuronsOutputError(), new NNRandomInitializer(new MersenneTwisterRandomInitializer(-0.01f, 0.01f)), 0.01f, 0.5f, 0f, 0f, 0f, 1, 1000, 2);

  bpt.addEventListener(new LogTrainingListener(Thread.currentThread().getStackTrace()[1].getMethodName(), false, true));
View Full Code Here

  nn.setLayerCalculator(NNFactory.lcSigmoid(nn, null));
  NNFactory.lcMaxPooling(nn);

  // Mnist dataset provider
  MnistInputProvider trainInputProvider = new MnistInputProvider("train-images.idx3-ubyte", "train-labels.idx1-ubyte");
  trainInputProvider.addInputModifier(new ScalingInputFunction(255));
  MnistInputProvider testInputProvider = new MnistInputProvider("t10k-images.idx3-ubyte", "t10k-labels.idx1-ubyte");
  testInputProvider.addInputModifier(new ScalingInputFunction(255));

  // Backpropagation trainer that also works for convolutional and subsampling layers
  BackPropagationTrainer<?> bpt = TrainerFactory.backPropagation(nn, trainInputProvider, testInputProvider, new MultipleNeuronsOutputError(), new NNRandomInitializer(new MersenneTwisterRandomInitializer(-0.01f, 0.01f), 0.5f), 0.01f, 0.5f, 0f, 0f, 0f, 1, 1000, 1);

  // log data
View Full Code Here

  nn.setLayerCalculator(NNFactory.lcSigmoid(nn, null));
  NNFactory.lcMaxPooling(nn);

  // MNIST dataset
  MnistInputProvider trainInputProvider = new MnistInputProvider("train-images.idx3-ubyte", "train-labels.idx1-ubyte");
  trainInputProvider.addInputModifier(new ScalingInputFunction(255));
  MnistInputProvider testInputProvider = new MnistInputProvider("t10k-images.idx3-ubyte", "t10k-labels.idx1-ubyte");
  testInputProvider.addInputModifier(new ScalingInputFunction(255));

  // Backpropagation trainer that also works for convolutional and subsampling layers
  BackPropagationTrainer<?> bpt = TrainerFactory.backPropagation(nn, trainInputProvider, testInputProvider, new MultipleNeuronsOutputError(), new NNRandomInitializer(new MersenneTwisterRandomInitializer(-0.01f, 0.01f)), 0.02f, 0.5f, 0f, 0f, 0f, 1, 1, 1);

  // log data
View Full Code Here

  nn.setLayerCalculator(NNFactory.lcSigmoid(nn, null));
  NNFactory.lcMaxPooling(nn);

  // MNIST dataset
  MnistInputProvider trainInputProvider = new MnistInputProvider("train-images.idx3-ubyte", "train-labels.idx1-ubyte");
  trainInputProvider.addInputModifier(new ScalingInputFunction(255));
  MnistInputProvider testInputProvider = new MnistInputProvider("t10k-images.idx3-ubyte", "t10k-labels.idx1-ubyte");
  testInputProvider.addInputModifier(new ScalingInputFunction(255));

  // Backpropagation trainer that also works for convolutional and subsampling layers
  BackPropagationTrainer<?> bpt = TrainerFactory.backPropagation(nn, trainInputProvider, testInputProvider, new MultipleNeuronsOutputError(), new NNRandomInitializer(new MersenneTwisterRandomInitializer(-0.01f, 0.01f)), 0.02f, 0.5f, 0f, 0f, 0f, 1, 1, 1);

  // log data
View Full Code Here

    }

    @Test
    public void testScaling() {
  float[][] input = new float[][] { { 1, 3 }, { -2, 1.5f } };
  ScalingInputFunction si = new ScalingInputFunction(new SimpleInputProvider(input));
  Matrix m = TensorFactory.matrix(input);
  si.value(m);

  assertEquals(0.5f, m.get(0, 0), 0);
  assertEquals(-1f, m.get(0, 1), 0);
  assertEquals(1f, m.get(1, 0), 0);
  assertEquals(0.5f, m.get(1, 1), 0);
View Full Code Here

TOP

Related Classes of com.github.neuralnetworks.input.ScalingInputFunction

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.