Package org.encog.neural.networks.training.propagation.resilient

Examples of org.encog.neural.networks.training.propagation.resilient.ResilientPropagation


        XORSQL.SQL_URL,
        XORSQL.SQL_UID,
        XORSQL.SQL_PWD);
   
    // train the neural network
    final MLTrain train = new ResilientPropagation(network, trainingSet);
    // reset if improve is less than 1% over 5 cycles
    train.addStrategy(new RequiredImprovementStrategy(5));
   
    int epoch = 1;

    do {
      train.iteration();
      System.out
          .println("Epoch #" + epoch + " Error:" + train.getError());
      epoch++;
    } while(train.getError() > 0.01);

    // test the neural network
    System.out.println("Neural Network Results:");
    for(MLDataPair pair: trainingSet ) {
      final MLData output = network.compute(pair.getInput());
View Full Code Here


    network = EncogUtility.simpleFeedForward(INPUT_WINDOW, PREDICT_WINDOW*2, 0, 1, true);
    network.reset();
    graph.setNetwork(network);
   
    this.trainingData = generateTraining();
    this.train = new ResilientPropagation(this.network,this.trainingData);
    btnTrain = new JButton("Train");
    this.btnTrain.addActionListener(this);
    content.add(btnTrain,BorderLayout.SOUTH);
    graph.setError(network.calculateError(this.trainingData));
  }
View Full Code Here

  }
 
  public static double evaluateRPROP(BasicNetwork network,MLDataSet data)
  {

    ResilientPropagation train = new ResilientPropagation(network,data);
    train.setNumThreads(1);
    long start = System.currentTimeMillis();
    System.out.println("Training 20 Iterations with RPROP");
    for(int i=1;i<=20;i++)
    {
      train.iteration();
      System.out.println("Iteration #" + i + " Error:" + train.getError());
    }
    train.finishTraining();
    long stop = System.currentTimeMillis();
    double diff = ((double)(stop - start))/1000.0;
    System.out.println("RPROP Result:" + diff + " seconds." );
    System.out.println("Final RPROP error: " + network.calculateError(data));
    return diff;
View Full Code Here

  }
 
  public static double evaluateMPROP(BasicNetwork network,MLDataSet data)
  {

    ResilientPropagation train = new ResilientPropagation(network,data);
    train.setNumThreads(0);
    long start = System.currentTimeMillis();
    System.out.println("Training 20 Iterations with MPROP");
    for(int i=1;i<=20;i++)
    {
      train.iteration();
      System.out.println("Iteration #" + i + " Error:" + train.getError());
    }
    train.finishTraining();
    long stop = System.currentTimeMillis();
    double diff = ((double)(stop - start))/1000.0;
    System.out.println("MPROP Result:" + diff + " seconds." );
    System.out.println("Final MPROP error: " + network.calculateError(data));
    return diff;
View Full Code Here

    return network;
  }

  public void train(BasicNetwork network, MLDataSet training) {
    final FoldedDataSet folded = new FoldedDataSet(training);
    final MLTrain train = new ResilientPropagation(network, folded);
    final CrossValidationKFold trainFolded = new CrossValidationKFold(train,4);

    int epoch = 1;

    do {
View Full Code Here

 
  public static void main(String[] args)
  {
    MLDataSet trainingSet = new BasicMLDataSet(XOR_INPUT, XOR_IDEAL);
    BasicNetwork network = EncogUtility.simpleFeedForward(2, 4, 0, 1, false);
    ResilientPropagation train = new ResilientPropagation(network, trainingSet);
    train.addStrategy(new RequiredImprovementStrategy(5));
   
    System.out.println("Perform initial train.");
    EncogUtility.trainToError(train,0.01);
    TrainingContinuation cont = train.pause();
    System.out.println(Arrays.toString((double[])cont.getContents().get(ResilientPropagation.LAST_GRADIENTS)));
    System.out.println(Arrays.toString((double[])cont.getContents().get(ResilientPropagation.UPDATE_VALUES)));
   
    try
    {
    cont = (TrainingContinuation)SerializeObject.load(new File("resume.ser"));
    }
    catch(Exception ex)
    {
      ex.printStackTrace();
    }
   
    System.out.println("Now trying a second train, with continue from the first.  Should stop after one iteration");
    ResilientPropagation train2 = new ResilientPropagation(network, trainingSet);
    train2.resume(cont);
    EncogUtility.trainToError(train2,0.01)
  }
View Full Code Here

  public static int evaluateTrain(
      final BasicNetwork network, final MLDataSet training) {
    // train the neural network
    MLTrain train;
   
    train = new ResilientPropagation(network, training);

    final long start = System.currentTimeMillis();
    final long stop = start + (10 * MILIS);

    int iterations = 0;
View Full Code Here

        + this.outputCount);

    final double strategyError = Double.parseDouble(strStrategyError);
    final int strategyCycles = Integer.parseInt(strStrategyCycles);

    final ResilientPropagation train = new ResilientPropagation(this.network, this.training);
    train.addStrategy(new ResetStrategy(strategyError, strategyCycles));

    if (strMode.equalsIgnoreCase("gui")) {
      TrainingDialog.trainDialog(train, this.network, this.training);
    } else {
      final int minutes = Integer.parseInt(strMinutes);
View Full Code Here

      { 0.0, 1.0 }, { 1.0, 1.0 } };

  public static double XOR_IDEAL[][] = { { 0.0 }, { 1.0 }, { 1.0 }, { 0.0 } };

  public static double evaluate(BasicNetwork network, MLDataSet training) {
    ResilientPropagation rprop = new ResilientPropagation(network, training);
    double startingError = network.calculateError(training);
    for (int i = 0; i < ITERATIONS; i++) {
      rprop.iteration();
    }
    double finalError = network.calculateError(training);
    return startingError - finalError;
  }
View Full Code Here

    // create training data
    MLDataSet trainingSet = new BasicMLDataSet(XOR_INPUT, XOR_IDEAL);
   
    // train the neural network
    final ResilientPropagation train = new ResilientPropagation(network, trainingSet);

    int epoch = 1;

    do {
      train.iteration();
      System.out
          .println("Epoch #" + epoch + " Error:" + train.getError());
      epoch++;
    } while(train.getError() > 0.01);

    // test the neural network
    System.out.println("Neural Network Results:");
    for(MLDataPair pair: trainingSet ) {
      final MLData output = network.compute(pair.getInput());
View Full Code Here

TOP

Related Classes of org.encog.neural.networks.training.propagation.resilient.ResilientPropagation

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.