Package org.encog.neural.networks.training.propagation

Examples of org.encog.neural.networks.training.propagation.Propagation


    // train the neural network

    double error = Double.POSITIVE_INFINITY;
    for (int z = 0; z < this.weightTries; z++) {
      network.reset();
      final Propagation train = new ResilientPropagation(network,
          useTraining);
      final StopTrainingStrategy strat = new StopTrainingStrategy(0.001,
          5);

      train.addStrategy(strat);
      train.setNumThreads(1); // force single thread mode

      for (int i = 0; (i < this.iterations) && !getShouldStop()
          && !strat.shouldStop(); i++) {
        train.iteration();
      }

      error = Math.min(error, train.getError());
    }

    if (buffer != null) {
      buffer.close();
    }
View Full Code Here


   * @param trainingSet
   *            The training set to use.
   */
  public static void trainDialog(final BasicNetwork network,
      final MLDataSet trainingSet) {
    final Propagation train = new ResilientPropagation(network, trainingSet);
    train.setNumThreads(0);
    TrainingDialog.trainDialog(train, network, trainingSet);
  }
View Full Code Here

   * @param minutes
   *            The number of minutes to train for.
   */
  public static void trainConsole(final BasicNetwork network,
      final MLDataSet trainingSet, final int minutes) {
    final Propagation train = new ResilientPropagation(network, trainingSet);
    train.setNumThreads(0);
    EncogUtility.trainConsole(train, network, trainingSet, minutes);
  }
View Full Code Here

    for(int i=0;i<TRIES;i++) {
   
      MLMethod method = EncogUtility.simpleFeedForward(INPUT_OUTPUT_COUNT,
          HIDDEN_COUNT, 0, INPUT_OUTPUT_COUNT, false);
     
      Propagation train = new Backpropagation((ContainsFlat)method, trainingData,1.7,0);
      //Propagation train = new ResilientPropagation((ContainsFlat)method, trainingData);
      ((Propagation)train).fixFlatSpot(true);
     
      int iteration = 0;
      do {
        train.iteration();
       
        iteration++;
      } while( train.getError()>0.01 );
      count[i] = iteration;
      System.out.println("Begin Try #" + (i+1) + ", took " + iteration + " iterations.");     
    }
   
    System.out.println("Tries: " + TRIES);
View Full Code Here

  /**
   * {@inheritDoc}
   */
  @Override
  public final void createTrainer(final boolean singleThreaded) {
    final Propagation train = new ResilientPropagation(getNetwork(),
        getTraining(), getInitialUpdate(), getMaxStep());

    if (singleThreaded) {
      train.setNumThreads(1);
    } else {
      train.setNumThreads(0);
    }

    for (final Strategy strategy : getStrategies()) {
      train.addStrategy(strategy);
    }

    setTrain(train);
  }
View Full Code Here

  /**
   * {@inheritDoc}
   */
  @Override
  public final void createTrainer(final boolean singleThreaded) {
    final Propagation train = new Backpropagation(getNetwork(),
        getTraining(), getLearningRate(), getMomentum());

    if (singleThreaded) {
      train.setNumThreads(1);
    } else {
      train.setNumThreads(0);
    }

    for (final Strategy strategy : getStrategies()) {
      train.addStrategy(strategy);
    }

    setTrain(train);
  }
View Full Code Here

   * @param minutes
   *            The number of minutes to train for.
   */
  public static void trainConsole(final BasicNetwork network,
      final MLDataSet trainingSet, final int minutes) {
    final Propagation train = new ResilientPropagation(network, trainingSet);
    train.setThreadCount(0);
    EncogUtility.trainConsole(train, network, trainingSet, minutes);
  }
View Full Code Here

    // train the neural network

    double error = Double.POSITIVE_INFINITY;
    for (int z = 0; z < this.weightTries; z++) {
      network.reset();
      final Propagation train = new ResilientPropagation(network,
          useTraining);
      final StopTrainingStrategy strat = new StopTrainingStrategy(0.001,
          5);

      train.addStrategy(strat);
      train.setThreadCount(1); // force single thread mode

      for (int i = 0; (i < this.iterations) && !getShouldStop()
          && !strat.shouldStop(); i++) {
        train.iteration();
      }

      error = Math.min(error, train.getError());
    }

    if (buffer != null) {
      buffer.close();
    }
View Full Code Here

   * @param trainingSet
   *            The training set to use.
   */
  public static void trainDialog(final BasicNetwork network,
      final MLDataSet trainingSet) {
    final Propagation train = new ResilientPropagation(network, trainingSet);
    train.setThreadCount(0);
    TrainingDialog.trainDialog(train, network, trainingSet);
  }
View Full Code Here

  /**
   * {@inheritDoc}
   */
  @Override
  public void createTrainer(final boolean singleThreaded) {
    final Propagation train = new ResilientPropagation(getNetwork(),
        getTraining(), getInitialUpdate(), getMaxStep());

    if (singleThreaded) {
      train.setThreadCount(1);
    } else {
      train.setThreadCount(0);
    }

    for (final Strategy strategy : getStrategies()) {
      train.addStrategy(strategy);
    }

    setTrain(train);
  }
View Full Code Here

TOP

Related Classes of org.encog.neural.networks.training.propagation.Propagation

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.