Package org.neuroph.core.learning

Examples of org.neuroph.core.learning.SupervisedTrainingElement


     * Create and run MLP with XOR training set
     */
    public static void main(String[] args) {
        // create training set (logical XOR function)
        TrainingSet trainingSet = new TrainingSet(2, 1);
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{0, 0}, new double[]{0}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{0, 1}, new double[]{1}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{1, 0}, new double[]{1}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{1, 1}, new double[]{0}));

        MultiLayerPerceptron nnet = new MultiLayerPerceptron( TransferFunctionType.TANH ,2, 3, 1);
        MatrixMultiLayerPerceptron mnet = new MatrixMultiLayerPerceptron(nnet);

        System.out.println("Training network...");
View Full Code Here


     */
    public static void main(String args[]) {
            // create training set (logical AND function)
            TrainingSet trainingSet = new TrainingSet(2, 1);
            trainingSet.addElement(new SupervisedTrainingElement(new double[]{0, 0}, new double[]{0}));
            trainingSet.addElement(new SupervisedTrainingElement(new double[]{0, 1}, new double[]{0}));
            trainingSet.addElement(new SupervisedTrainingElement(new double[]{1, 0}, new double[]{0}));
            trainingSet.addElement(new SupervisedTrainingElement(new double[]{1, 1}, new double[]{1}));

            // create perceptron neural network
            NeuralNetwork myPerceptron = new Perceptron(2, 1);
            // learn the training set
            myPerceptron.learnInSameThread(trainingSet);
View Full Code Here

        input[index++] = this.normalizedSunspots[i];
      }

      ideal[0] = this.normalizedSunspots[year];

      result.addElement(new SupervisedTrainingElement(input, ideal));
    }
    return result;
  }
View Full Code Here

     */
    public static void main(String[] args) {
     
        // create training set (logical XOR function)
        TrainingSet trainingSet = new TrainingSet(2, 1);
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{0, 0}, new double[]{0}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{0, 1}, new double[]{1}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{1, 0}, new double[]{1}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{1, 1}, new double[]{0}));

        // create multi layer perceptron
        MultiLayerPerceptron myMlPerceptron = new MultiLayerPerceptron(TransferFunctionType.TANH, 2, 3, 1);

        // enable batch if using MomentumBackpropagation
View Full Code Here

            if (outputsCount > 0 && i + inputsCount + outputsCount <= values.length) {
                for (int j = i + inputsCount; j < i + inputsCount + outputsCount; j++) {
                    outputs.add(values[j]);
                }
                if (outputsCount > 0) {
                    trainingSet.addElement(new SupervisedTrainingElement(inputs, outputs));
                } else {
                    trainingSet.addElement(new TrainingElement(inputs));
                }
            }
        }
View Full Code Here

        ((LMS) neuralNet.getLearningRule()).setLearningRate(0.7);//0-1
        ((LMS) neuralNet.getLearningRule()).setMaxIterations(maxIterations);//0-1
        TrainingSet trainingSet = new TrainingSet();

        double daxmax = 10000.0D;
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{3710.0D / daxmax, 3690.0D / daxmax, 3890.0D / daxmax, 3695.0D / daxmax}, new double[]{3666.0D / daxmax}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{3690.0D / daxmax, 3890.0D / daxmax, 3695.0D / daxmax, 3666.0D / daxmax}, new double[]{3692.0D / daxmax}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{3890.0D / daxmax, 3695.0D / daxmax, 3666.0D / daxmax, 3692.0D / daxmax}, new double[]{3886.0D / daxmax}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{3695.0D / daxmax, 3666.0D / daxmax, 3692.0D / daxmax, 3886.0D / daxmax}, new double[]{3914.0D / daxmax}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{3666.0D / daxmax, 3692.0D / daxmax, 3886.0D / daxmax, 3914.0D / daxmax}, new double[]{3956.0D / daxmax}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{3692.0D / daxmax, 3886.0D / daxmax, 3914.0D / daxmax, 3956.0D / daxmax}, new double[]{3953.0D / daxmax}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{3886.0D / daxmax, 3914.0D / daxmax, 3956.0D / daxmax, 3953.0D / daxmax}, new double[]{4044.0D / daxmax}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{3914.0D / daxmax, 3956.0D / daxmax, 3953.0D / daxmax, 4044.0D / daxmax}, new double[]{3987.0D / daxmax}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{3956.0D / daxmax, 3953.0D / daxmax, 4044.0D / daxmax, 3987.0D / daxmax}, new double[]{3996.0D / daxmax}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{3953.0D / daxmax, 4044.0D / daxmax, 3987.0D / daxmax, 3996.0D / daxmax}, new double[]{4043.0D / daxmax}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{4044.0D / daxmax, 3987.0D / daxmax, 3996.0D / daxmax, 4043.0D / daxmax}, new double[]{4068.0D / daxmax}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{3987.0D / daxmax, 3996.0D / daxmax, 4043.0D / daxmax, 4068.0D / daxmax}, new double[]{4176.0D / daxmax}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{3996.0D / daxmax, 4043.0D / daxmax, 4068.0D / daxmax, 4176.0D / daxmax}, new double[]{4187.0D / daxmax}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{4043.0D / daxmax, 4068.0D / daxmax, 4176.0D / daxmax, 4187.0D / daxmax}, new double[]{4223.0D / daxmax}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{4068.0D / daxmax, 4176.0D / daxmax, 4187.0D / daxmax, 4223.0D / daxmax}, new double[]{4259.0D / daxmax}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{4176.0D / daxmax, 4187.0D / daxmax, 4223.0D / daxmax, 4259.0D / daxmax}, new double[]{4203.0D / daxmax}));
        trainingSet.addElement(new SupervisedTrainingElement(new double[]{4187.0D / daxmax, 4223.0D / daxmax, 4259.0D / daxmax, 4203.0D / daxmax}, new double[]{3989.0D / daxmax}));
        neuralNet.learnInSameThread(trainingSet);
        System.out.println("Time stamp N2:" + new SimpleDateFormat("dd-MMM-yyyy HH:mm:ss:MM").format(new Date()));

        TrainingSet testSet = new TrainingSet();
        testSet.addElement(new TrainingElement(new double[]{4223.0D / daxmax, 4259.0D / daxmax, 4203.0D / daxmax, 3989.0D / daxmax}));
View Full Code Here

                double d2 = (Double.parseDouble(s2[1]) - minlevel) / normalizer;
                double d3 = (Double.parseDouble(s3[1]) - minlevel) / normalizer;
                double d4 = (Double.parseDouble(s4[1]) - minlevel) / normalizer;
                double d5 = (Double.parseDouble(s5[1]) - minlevel) / normalizer;
                System.out.println(i + " " + d1 + " " + d2 + " " + d3 + " " + d4 + " ->" + d5);
                trainingSet.addElement(new SupervisedTrainingElement(new double[]{d1, d2, d3, d4}, new double[]{d5}));
            }
        } catch (Exception e) {
            System.out.println(e.getMessage());
            return null;
        }
View Full Code Here

           for (int i = 0; i < outputsCount; i++)
          outputs[i] = Double.parseDouble(values[inputsCount + i]);

        if (outputsCount>0) {
              trainingSet.addElement(new SupervisedTrainingElement(inputs, outputs));
        } else {
              trainingSet.addElement(new TrainingElement(inputs));
        }
      }
View Full Code Here

    TrainingSet trainingSet = new TrainingSet();

    for (Entry<String, FractionRgbData> entry : rgbDataMap.entrySet()) {
      double[] input = entry.getValue().getFlattenedRgbValues();
      double[] response = createResponse(entry.getKey(), imageLabels);
      trainingSet.addElement(new SupervisedTrainingElement(
          VectorParser.convertToVector(input),
          VectorParser.convertToVector(response)));
    }

                return trainingSet;
View Full Code Here

    for (Entry<String, FractionRgbData> entry : rgbDataMap.entrySet()) {
      double[] inputRGB = entry.getValue().getFlattenedRgbValues();
                        double[] inputBW = FractionRgbData.convertRgbInputToBinaryBlackAndWhite(inputRGB);
                        double[] response = createResponse(entry.getKey(), imageLabels);
      trainingSet.addElement(new SupervisedTrainingElement(
          VectorParser.convertToVector(inputBW),
          VectorParser.convertToVector(response)));
    }

            return trainingSet;
View Full Code Here

TOP

Related Classes of org.neuroph.core.learning.SupervisedTrainingElement

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.