Package com.github.neuralnetworks.util

Examples of com.github.neuralnetworks.util.Properties


  return t;
    }

    protected static Properties backpropProperties(NeuralNetwork nn, TrainingInputProvider trainingSet, TrainingInputProvider testingSet, OutputError error, NNRandomInitializer rand, float learningRate, float momentum, float l1weightDecay, float l2weightDecay) {
  Properties p = new Properties();
  p.setParameter(Constants.NEURAL_NETWORK, nn);
  p.setParameter(Constants.TRAINING_INPUT_PROVIDER, trainingSet);
  p.setParameter(Constants.TESTING_INPUT_PROVIDER, testingSet);
  p.setParameter(Constants.LEARNING_RATE, learningRate);
  p.setParameter(Constants.MOMENTUM, momentum);
  p.setParameter(Constants.L1_WEIGHT_DECAY, l1weightDecay);
  p.setParameter(Constants.L2_WEIGHT_DECAY, l2weightDecay);
  p.setParameter(Constants.OUTPUT_ERROR_DERIVATIVE, new MSEDerivative());
  p.setParameter(Constants.OUTPUT_ERROR, error);
  p.setParameter(Constants.RANDOM_INITIALIZER, rand);

  return p;
    }
View Full Code Here


  return new AparapiCDTrainer(rbmProperties(rbm, lc, trainingSet, testingSet, error, rand, learningRate, momentum, l1weightDecay, l2weightDecay, gibbsSampling, isPersistentCD));
    }

    protected static Properties rbmProperties(RBM rbm, RBMLayerCalculator lc, TrainingInputProvider trainingSet, TrainingInputProvider testingSet, OutputError error, NNRandomInitializer rand, float learningRate, float momentum, float l1weightDecay, float l2weightDecay, int gibbsSampling, boolean resetRBM) {
  Properties p = new Properties();
  p.setParameter(Constants.NEURAL_NETWORK, rbm);
  p.setParameter(Constants.TRAINING_INPUT_PROVIDER, trainingSet);
  p.setParameter(Constants.TESTING_INPUT_PROVIDER, testingSet);
  p.setParameter(Constants.LEARNING_RATE, learningRate);
  p.setParameter(Constants.MOMENTUM, momentum);
  p.setParameter(Constants.L1_WEIGHT_DECAY, l1weightDecay);
  p.setParameter(Constants.L2_WEIGHT_DECAY, l2weightDecay);
  p.setParameter(Constants.GIBBS_SAMPLING_COUNT, gibbsSampling);
  p.setParameter(Constants.OUTPUT_ERROR, error);
  p.setParameter(Constants.RANDOM_INITIALIZER, rand);
  p.setParameter(Constants.RESET_RBM, resetRBM);
  p.setParameter(Constants.LAYER_CALCULATOR, lc);

  return p;
    }
View Full Code Here

    public static DBNTrainer dbnTrainer(DNN<?> dnn, Map<NeuralNetwork, OneStepTrainer<?>> layerTrainers, TrainingInputProvider trainingSet, TrainingInputProvider testingSet, OutputError error) {
  return new DBNTrainer(layerTrainerProperties(dnn, layerTrainers, trainingSet, testingSet, error));
    }

    protected static Properties layerTrainerProperties(DNN<?> dnn, Map<NeuralNetwork, OneStepTrainer<?>> layerTrainers, TrainingInputProvider trainingSet, TrainingInputProvider testingSet, OutputError error) {
  Properties p = new Properties();
  p.setParameter(Constants.NEURAL_NETWORK, dnn);
  p.setParameter(Constants.TRAINING_INPUT_PROVIDER, trainingSet);
  p.setParameter(Constants.TESTING_INPUT_PROVIDER, testingSet);
  p.setParameter(Constants.OUTPUT_ERROR, error);
  p.setParameter(Constants.LAYER_TRAINERS, layerTrainers);

  return p;
    }
View Full Code Here

  return properties != null ? properties.getParameter(Constants.LAYER_CALCULATOR) : null;
    }

    public void setLayerCalculator(LayerCalculator layerCalculator) {
  if (properties == null) {
      properties = new Properties();
  }

  properties.setParameter(Constants.LAYER_CALCULATOR, layerCalculator);
    }
View Full Code Here

      throw new IllegalArgumentException("first layer must be convolutional");
  }

  NeuralNetworkImpl result = new NeuralNetworkImpl();
  ConnectionFactory cf = new ConnectionFactory();
  result.setProperties(new Properties());
  result.getProperties().setParameter(Constants.CONNECTION_FACTORY, cf);

  Layer prev = null;
  int prevUnitCount = layers[0][0] * layers[0][1] * layers[0][2];
  result.addLayer(prev = new Layer());
View Full Code Here

  if (layers.length <= 1) {
      throw new IllegalArgumentException("more than one layer is required");
  }

  if (nn.getProperties() == null) {
      nn.setProperties(new Properties());
  }
  nn.getProperties().setParameter(Constants.CONNECTION_FACTORY, cf);

  addFullyConnectedLayer(nn, new Layer(), cf, layers[0], layers[0], addBias);
  for (int i = 1; i < layers.length; i++) {
View Full Code Here

      throw new IllegalArgumentException("more than one layer is required");
  }

  DBN result = new DBN();
  ConnectionFactory cf = new ConnectionFactory();
  result.setProperties(new Properties());
  result.getProperties().setParameter(Constants.CONNECTION_FACTORY, cf);

  result.addLayer(new Layer());
  for (int i = 1; i < layers.length; i++) {
      RBM rbm = new RBM();
      rbm.setProperties(new Properties());
      rbm.getProperties().setParameter(Constants.CONNECTION_FACTORY, cf);

      rbm.addConnections(cf.fullyConnected(result.getOutputLayer(), new Layer(), layers[i - 1], layers[i]));

      if (addBias) {
View Full Code Here

  if (layers == null || layers.length <= 1) {
      throw new IllegalArgumentException("more than one layer is required");
  }

  ConnectionFactory cf = new ConnectionFactory();
  Properties properties = new Properties();
  properties.setParameter(Constants.CONNECTION_FACTORY, cf);
  StackedAutoencoder result = new StackedAutoencoder(new Layer());
  result.setProperties(properties);

  for (int i = 1; i < layers.length; i++) {
      Autoencoder ae = new Autoencoder();
      ae.setProperties(new Properties());
      ae.getProperties().setParameter(Constants.CONNECTION_FACTORY, cf);

      ae.addLayer(result.getOutputLayer());
      NNFactory.addFullyConnectedLayer(ae, new Layer(), cf, layers[i - 1], layers[i], addBias);
      NNFactory.addFullyConnectedLayer(ae, new Layer(), cf, layers[i], layers[i - 1], addBias);
View Full Code Here

     * @param momentum
     * @param l1weightDecay
     * @return
     */
    public static BackPropagationTrainer<?> backPropagation(NeuralNetworkImpl nn, TrainingInputProvider trainingSet, TrainingInputProvider testingSet, OutputError error, NNRandomInitializer rand, float learningRate, float momentum, float l1weightDecay, float l2weightDecay, float dropoutRate, int trainingBatchSize, int testBatchSize, int epochs) {
  Properties p = backpropProperties(nn, trainingSet, testingSet, error, rand, learningRate, momentum, l1weightDecay, l2weightDecay, dropoutRate, trainingBatchSize, testBatchSize, epochs);
  p.setParameter(Constants.BACKPROPAGATION, bplc(nn, p));

  return new BackPropagationTrainer<NeuralNetwork>(p);
    }
View Full Code Here

  return blc;
    }

    public static BackPropagationAutoencoder backPropagationAutoencoder(NeuralNetworkImpl nn, TrainingInputProvider trainingSet, TrainingInputProvider testingSet, OutputError error, NNRandomInitializer rand, float learningRate, float momentum, float l1weightDecay, float l2weightDecay, float inputCorruptionRate, int trainingBatchSize, int testBatchSize, int epochs) {
  Properties p = backpropProperties(nn, trainingSet, testingSet, error, rand, learningRate, momentum, l1weightDecay, l2weightDecay, 0F, trainingBatchSize, testBatchSize, epochs);
  p.setParameter(Constants.CORRUPTION_LEVEL, inputCorruptionRate);
  p.setParameter(Constants.BACKPROPAGATION, bplc(nn, p));

  return new BackPropagationAutoencoder(p);
    }
View Full Code Here

TOP

Related Classes of com.github.neuralnetworks.util.Properties

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.