Package org.encog.ml.train

Examples of org.encog.ml.train.MLTrain.iteration()


  {
    int epoch = 1;

    MLTrain train = new TrainOutstar(network,training,0.1);
    do {
      train.iteration();
      System.out
          .println("Training outstar, Epoch #" + epoch + ", error=" + train.getError() );
      epoch++;
    } while(train.getError()>0.01);
  }
View Full Code Here


    // train the neural network
    final MLTrain train = new ResilientPropagation(network, trainingSet);

    do {
      train.iteration();
    } while (train.getError() > 0.009);

    double e = network.calculateError(trainingSet);
    System.out.println("Network traiined to error: " + e);
View Full Code Here

    trainMain.addStrategy(new HybridStrategy(trainAlt));
    trainMain.addStrategy(stop);

    int epoch = 0;
    while (!stop.shouldStop()) {
      trainMain.iteration();
      System.out.println("Training " + what + ", Epoch #" + epoch
          + " Error:" + trainMain.getError());
      epoch++;
    }
    return trainMain.getError();
View Full Code Here

    //randomizer.randomize(network);
    System.out.println(network.dumpWeights());
    MLTrain rprop = new ResilientPropagation(network, trainingData);
    int iteration = 0;
    do {
      rprop.iteration();
      System.out.println(rprop.getError());
      iteration++;
    } while( iteration<5000 && rprop.getError()>0.01);
    System.out.println(iteration);
    Assert.assertTrue(iteration<40);
View Full Code Here

    }
   
    int epoch = 1;

    for(int i=0;i<50;i++) {
      train.iteration();
      System.out
          .println("Epoch #" + epoch + " Score:" + train.getError());
      epoch++;
    }
View Full Code Here

    MLDataSet trainingSet = new BasicMLDataSet(XOR_INPUT, XOR_IDEAL);
    final MLTrain train = new ResilientPropagation(network, trainingSet);
    //
    int epoch = 1;
    do {
      train.iteration();
      System.out
          .println("Epoch #" + epoch + " Error:" + train.getError());
      epoch++;
    } while(train.getError() > 0.01 && epoch<5000);
   
View Full Code Here

    BasicNetwork network = EncogUtility.simpleFeedForward(2, 5, 7, 1, true);
    (new ConsistentRandomizer(-1,1)).randomize(network);
    MLTrain rprop = new ResilientPropagation(network, trainingData);
    int iteration = 0;
    do {
      rprop.iteration();
      iteration++;
    } while( iteration<5000 && rprop.getError()>0.01);
    Assert.assertTrue(iteration<40);
  }
 
View Full Code Here

      this.currentJob.createTrainer(this.manager.isSingleThreaded());
      final MLTrain train = this.currentJob.getTrain();
      int interation = 1;

      while (this.currentJob.shouldContinue()) {
        train.iteration();
        interation++;
      }
      watch.stop();
    } catch (final Throwable t) {
      this.currentJob.setError(t);
View Full Code Here

          fold.getValidation());
      train.addStrategy(earlyStop);

      StringBuilder line = new StringBuilder();
      while (!train.isTrainingDone()) {
        train.iteration();
        line.setLength(0);
        line.append("Fold #");
        line.append(foldNum);
        line.append("/");
        line.append(k);
View Full Code Here

        report.report(k, foldNum, line.toString());
      }
      fold.setScore(earlyStop.getValidationError());
      fold.setMethod(method);
    } else if (train.getImplementationType() == TrainingImplementationType.OnePass) {
      train.iteration();
      double validationError = calculateError(method,
          fold.getValidation());
      this.report.report(k, k,
          "Trained, Training Error: " + train.getError()
              + ", Validatoin Error: " + validationError);
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.