Package org.encog.neural.networks.training.propagation.resilient

Examples of org.encog.neural.networks.training.propagation.resilient.ResilientPropagation.iteration()


    Assert.assertEquals(0.0, network.getStructure().getFlat().getWeights()[0], 0.01);
    Assert.assertEquals(0.0, network.getStructure().getFlat().getWeights()[1], 0.01);
    rprop.iteration();
    rprop.iteration();
    rprop.iteration();
    rprop.iteration();
    // these connections were removed, and should not have been "trained"
    Assert.assertEquals(0.0, network.getStructure().getFlat().getWeights()[0], 0.01);
    Assert.assertEquals(0.0, network.getStructure().getFlat().getWeights()[1], 0.01);   
    rprop.finishTraining();
  }
View Full Code Here


    MLDataSet training = EncoderTrainingFactory.generateTraining(4, false);
    BasicNetwork network = EncogUtility.simpleFeedForward(4, 2, 0, 4, true);
    (new ConsistentRandomizer(-1,1,50)).randomize(network);
    ResilientPropagation rprop = new ResilientPropagation(network,training);
    for(int i=0;i<5;i++) {
      rprop.iteration();
    }
    Assert.assertArrayEquals(EXPECTED_WEIGHTS1, network.getFlat().getWeights(),0.0001);
    for(int i=0;i<5;i++) {
      rprop.iteration();
    }
View Full Code Here

    for(int i=0;i<5;i++) {
      rprop.iteration();
    }
    Assert.assertArrayEquals(EXPECTED_WEIGHTS1, network.getFlat().getWeights(),0.0001);
    for(int i=0;i<5;i++) {
      rprop.iteration();
    }
    Assert.assertArrayEquals(EXPECTED_WEIGHTS2, network.getFlat().getWeights(),0.0001)
   
    double e = network.calculateError(training);
    Assert.assertEquals(0.0767386807494191, e, 0.00001);
View Full Code Here

    BasicNetwork network2 = NetworkUtil.createXORNetworkUntrained();
    MLDataSet trainingData = new BasicMLDataSet(XOR.XOR_INPUT,XOR.XOR_IDEAL);
   
    // train network 1, no continue
    ResilientPropagation rprop1 = new ResilientPropagation(network1,trainingData);
    rprop1.iteration();
    rprop1.iteration();
    rprop1.iteration();
    rprop1.iteration();
   
    // train network 2, continue
View Full Code Here

    MLDataSet trainingData = new BasicMLDataSet(XOR.XOR_INPUT,XOR.XOR_IDEAL);
   
    // train network 1, no continue
    ResilientPropagation rprop1 = new ResilientPropagation(network1,trainingData);
    rprop1.iteration();
    rprop1.iteration();
    rprop1.iteration();
    rprop1.iteration();
   
    // train network 2, continue
    ResilientPropagation rprop2 = new ResilientPropagation(network2,trainingData);
View Full Code Here

   
    // train network 1, no continue
    ResilientPropagation rprop1 = new ResilientPropagation(network1,trainingData);
    rprop1.iteration();
    rprop1.iteration();
    rprop1.iteration();
    rprop1.iteration();
   
    // train network 2, continue
    ResilientPropagation rprop2 = new ResilientPropagation(network2,trainingData);
    rprop2.iteration();
View Full Code Here

    // train network 1, no continue
    ResilientPropagation rprop1 = new ResilientPropagation(network1,trainingData);
    rprop1.iteration();
    rprop1.iteration();
    rprop1.iteration();
    rprop1.iteration();
   
    // train network 2, continue
    ResilientPropagation rprop2 = new ResilientPropagation(network2,trainingData);
    rprop2.iteration();
    rprop2.iteration();
View Full Code Here

    rprop1.iteration();
    rprop1.iteration();
   
    // train network 2, continue
    ResilientPropagation rprop2 = new ResilientPropagation(network2,trainingData);
    rprop2.iteration();
    rprop2.iteration();
    TrainingContinuation state = rprop2.pause();
    rprop2 = new ResilientPropagation(network2,trainingData);
    rprop2.resume(state);
    rprop2.iteration();
View Full Code Here

    rprop1.iteration();
   
    // train network 2, continue
    ResilientPropagation rprop2 = new ResilientPropagation(network2,trainingData);
    rprop2.iteration();
    rprop2.iteration();
    TrainingContinuation state = rprop2.pause();
    rprop2 = new ResilientPropagation(network2,trainingData);
    rprop2.resume(state);
    rprop2.iteration();
    rprop2.iteration();
View Full Code Here

    rprop2.iteration();
    rprop2.iteration();
    TrainingContinuation state = rprop2.pause();
    rprop2 = new ResilientPropagation(network2,trainingData);
    rprop2.resume(state);
    rprop2.iteration();
    rprop2.iteration();
   
    // verify weights are the same
    double[] weights1 = NetworkCODEC.networkToArray(network1);
    double[] weights2 = NetworkCODEC.networkToArray(network2);
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.