Package org.encog.neural.networks.training

Examples of org.encog.neural.networks.training.TrainingError


   */
  public MLTrain create(final MLMethod method,
      final MLDataSet training, final String argsStr) {

    if (!(method instanceof MLEncodable)) {
      throw new TrainingError(
          "Invalid method type, requires an encodable MLMethod");
    }

    final CalculateScore score = new TrainingSetScore(training);

View Full Code Here


   *            The training state to return to.
   */
  @Override
  public void resume(final TrainingContinuation state) {
    if (!isValidResume(state)) {
      throw new TrainingError("Invalid training resume data length");
    }

    this.lastDelta = ((double[]) state.get(Backpropagation.LAST_DELTA));

  }
View Full Code Here

   *            The training data to use. Must be indexable.
   */
  public SVDTraining(final RBFNetwork network, final MLDataSet training) {
    super(TrainingImplementationType.OnePass);
    if (network.getOutputCount() != 1) {
      throw new TrainingError(
          "SVD requires an output layer with a single neuron.");
    }

    setTraining(training);
    this.network = network;
View Full Code Here

      final CalculateScore calculateScore, final double startTemp,
      final double stopTemp, final int cycles) {
    super(TrainingImplementationType.Iterative);
   
    if( !(network instanceof MLRegression) ) {
      throw new TrainingError("Simulated annealing requires the MLMethod to support MLRegression.");
    }
   
    this.network = network;
    this.calculateScore = calculateScore;
    this.anneal = new NeuralSimulatedAnnealingHelper(this);
View Full Code Here

   *            The training state to return to.
   */
  @Override
  public void resume(final TrainingContinuation state) {
    if (!isValidResume(state)) {
      throw new TrainingError("Invalid training resume data length");
    }
    final double[] lastGradient = (double[]) state
        .get(ResilientPropagation.LAST_GRADIENTS);
    final double[] updateValues = (double[]) state
        .get(ResilientPropagation.UPDATE_VALUES);
View Full Code Here

        break;
      case iRPROPm:
        weightChange = updateiWeightMinus(gradients,lastGradient,index);
        break;
      default:
        throw new TrainingError("Unknown RPROP type: " + this.rpropType);
    }
   
    this.lastWeightChange[index] = weightChange;
    return weightChange;
  }
View Full Code Here

   *            The training state to return to.
   */
  @Override
  public void resume(final TrainingContinuation state) {
    if (!isValidResume(state)) {
      throw new TrainingError("Invalid training resume data length");
    }

    final double[] lastGradient = (double[]) state
        .get(QuickPropagation.LAST_GRADIENTS);

View Full Code Here

   * @param data1
   *            Not used.
   */
  @Override
  public void add(final MLData data1) {
    throw new TrainingError(FoldedDataSet.ADD_NOT_SUPPORTED);

  }
View Full Code Here

   * @param idealData
   *            Not used.
   */
  @Override
  public void add(final MLData inputData, final MLData idealData) {
    throw new TrainingError(FoldedDataSet.ADD_NOT_SUPPORTED);

  }
View Full Code Here

   * @param inputData
   *            Not used.
   */
  @Override
  public void add(final MLDataPair inputData) {
    throw new TrainingError(FoldedDataSet.ADD_NOT_SUPPORTED);

  }
View Full Code Here

TOP

Related Classes of org.encog.neural.networks.training.TrainingError

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.