Package org.apache.hama.ml.math

Examples of org.apache.hama.ml.math.DenseDoubleVector$DefaultIterator


   */
  @Test
  public void testWithRegularization() {
    // generate training data
    DoubleVector[] trainingData = new DenseDoubleVector[] {
        new DenseDoubleVector(new double[] { 0, 0, 0 }),
        new DenseDoubleVector(new double[] { 0, 1, 1 }),
        new DenseDoubleVector(new double[] { 1, 0, 1 }),
        new DenseDoubleVector(new double[] { 1, 1, 0 }) };

    // set parameters
    double learningRate = 0.3;
    double regularization = 0.02; // regularization should be a tiny number
    double momentum = 0; // no momentum
    String squashingFunctionName = "Sigmoid";
    String costFunctionName = "CrossEntropy";
    int[] layerSizeArray = new int[] { 2, 7, 1 };
    SmallMultiLayerPerceptron mlp = new SmallMultiLayerPerceptron(learningRate,
        regularization, momentum, squashingFunctionName, costFunctionName,
        layerSizeArray);

    try {
      // train by multiple instances
      Random rnd = new Random();
      for (int i = 0; i < 20000; ++i) {
        DenseDoubleMatrix[] weightUpdates = mlp
            .trainByInstance(trainingData[rnd.nextInt(4)]);
        mlp.updateWeightMatrices(weightUpdates);
      }

      // System.out.printf("Weight matrices: %s\n",
      // mlp.weightsToString(mlp.getWeightMatrices()));
      for (int i = 0; i < trainingData.length; ++i) {
        DenseDoubleVector testVec = (DenseDoubleVector) trainingData[i]
            .slice(2);
        double expected = trainingData[i].toArray()[2];
        double actual = mlp.output(testVec).toArray()[0];
        if (expected < 0.5 && actual >= 0.5 || expected >= 0.5 && actual < 0.5) {
          Log.info("Neural network failes to lear the XOR.");
View Full Code Here


   */
  @Test
  public void testWithMomentum() {
    // generate training data
    DoubleVector[] trainingData = new DenseDoubleVector[] {
        new DenseDoubleVector(new double[] { 0, 0, 0 }),
        new DenseDoubleVector(new double[] { 0, 1, 1 }),
        new DenseDoubleVector(new double[] { 1, 0, 1 }),
        new DenseDoubleVector(new double[] { 1, 1, 0 }) };

    // set parameters
    double learningRate = 0.3;
    double regularization = 0.02; // regularization should be a tiny number
    double momentum = 0.5; // no momentum
    String squashingFunctionName = "Sigmoid";
    String costFunctionName = "CrossEntropy";
    int[] layerSizeArray = new int[] { 2, 7, 1 };
    SmallMultiLayerPerceptron mlp = new SmallMultiLayerPerceptron(learningRate,
        regularization, momentum, squashingFunctionName, costFunctionName,
        layerSizeArray);

    try {
      // train by multiple instances
      Random rnd = new Random();
      for (int i = 0; i < 5000; ++i) {
        DenseDoubleMatrix[] weightUpdates = mlp
            .trainByInstance(trainingData[rnd.nextInt(4)]);
        mlp.updateWeightMatrices(weightUpdates);
      }

      // System.out.printf("Weight matrices: %s\n",
      // mlp.weightsToString(mlp.getWeightMatrices()));
      for (int i = 0; i < trainingData.length; ++i) {
        DenseDoubleVector testVec = (DenseDoubleVector) trainingData[i]
            .slice(2);
        double expected = trainingData[i].toArray()[2];
        double actual = mlp.output(testVec).toArray()[0];
        if (expected < 0.5 && actual >= 0.5 || expected >= 0.5 && actual < 0.5) {
          Log.info("Neural network failes to lear the XOR.");
View Full Code Here

    String strDataPath = "/tmp/xor-training-by-xor";
    Path dataPath = new Path(strDataPath);

    // generate training data
    DoubleVector[] trainingData = new DenseDoubleVector[] {
        new DenseDoubleVector(new double[] { 0, 0, 0 }),
        new DenseDoubleVector(new double[] { 0, 1, 1 }),
        new DenseDoubleVector(new double[] { 1, 0, 1 }),
        new DenseDoubleVector(new double[] { 1, 1, 0 }) };

    try {
      URI uri = new URI(strDataPath);
      FileSystem fs = FileSystem.get(uri, conf);
      fs.delete(dataPath, true);
      if (!fs.exists(dataPath)) {
        fs.createNewFile(dataPath);
        SequenceFile.Writer writer = new SequenceFile.Writer(fs, conf,
            dataPath, LongWritable.class, VectorWritable.class);

        for (int i = 0; i < 1000; ++i) {
          VectorWritable vecWritable = new VectorWritable(trainingData[i % 4]);
          writer.append(new LongWritable(i), vecWritable);
        }
        writer.close();
      }

    } catch (Exception e) {
      e.printStackTrace();
    }

    // begin training
    String modelPath = "/tmp/xorModel-training-by-xor.data";
    double learningRate = 0.6;
    double regularization = 0.02; // no regularization
    double momentum = 0.3; // no momentum
    String squashingFunctionName = "Tanh";
    String costFunctionName = "SquaredError";
    int[] layerSizeArray = new int[] { 2, 5, 1 };
    SmallMultiLayerPerceptron mlp = new SmallMultiLayerPerceptron(learningRate,
        regularization, momentum, squashingFunctionName, costFunctionName,
        layerSizeArray);

    Map<String, String> trainingParams = new HashMap<String, String>();
    trainingParams.put("training.iteration", "2000");
    trainingParams.put("training.mode", "minibatch.gradient.descent");
    trainingParams.put("training.batch.size", "100");
    trainingParams.put("tasks", "3");
    trainingParams.put("modelPath", modelPath);

    try {
      mlp.train(dataPath, trainingParams);
    } catch (Exception e) {
      e.printStackTrace();
    }

    // test the model
    for (int i = 0; i < trainingData.length; ++i) {
      DenseDoubleVector testVec = (DenseDoubleVector) trainingData[i].slice(2);
      try {
        double expected = trainingData[i].toArray()[2];
        double actual = mlp.output(testVec).toArray()[0];
        if (expected < 0.5 && actual >= 0.5 || expected >= 0.5 && actual < 0.5) {
          Log.info("Neural network failes to lear the XOR.");
View Full Code Here

   *
   * @param inputInstance
   * @return The compressed information.
   */
  private DoubleVector transform(DoubleVector inputInstance, int inputLayer) {
    DoubleVector internalInstance = new DenseDoubleVector(inputInstance.getDimension() + 1);
    internalInstance.set(0, 1);
    for (int i = 0; i < inputInstance.getDimension(); ++i) {
      internalInstance.set(i + 1, inputInstance.get(i));
    }
    DoubleFunction squashingFunction = model
        .getSquashingFunction(inputLayer);
    DoubleMatrix weightMatrix = null;
    if (inputLayer == 0) {
View Full Code Here

        String[] tokens = line.trim().split(",");
        double[] vals = new double[tokens.length];
        for (int i = 0; i < tokens.length; ++i) {
          vals[i] = Double.parseDouble(tokens[i]);
        }
        DoubleVector instance = new DenseDoubleVector(vals);
        DoubleVector result = ann.getOutput(instance);
        double[] arrResult = result.toArray();
        StringBuilder sb = new StringBuilder();
        for (int i = 0; i < arrResult.length; ++i) {
          sb.append(arrResult[i]);
View Full Code Here

  }

  public static DoubleVector readVector(DataInput in) throws IOException {
    int length = in.readInt();
    DoubleVector vector;
    vector = new DenseDoubleVector(length);
    for (int i = 0; i < length; i++) {
      vector.set(i, in.readDouble());
    }
    return vector;
  }
View Full Code Here

    Preconditions.checkArgument(this.layerSizeList.get(0) == instance
        .getDimension() + 1, String.format(
        "The dimension of input instance should be %d.",
        this.layerSizeList.get(0) - 1));
    // add bias feature
    DoubleVector instanceWithBias = new DenseDoubleVector(
        instance.getDimension() + 1);
    instanceWithBias.set(0, 0.99999); // set bias to be a little bit less than
                                      // 1.0
    for (int i = 1; i < instanceWithBias.getDimension(); ++i) {
      instanceWithBias.set(i, instance.get(i - 1));
    }

    List<DoubleVector> outputCache = getOutputInternal(instanceWithBias);
    // return the output of the last layer
    DoubleVector result = outputCache.get(outputCache.size() - 1);
View Full Code Here

    DoubleVector vec = weightMatrix.multiplyVectorUnsafe(intermediateOutput);
    vec = vec.applyToElements(this.squashingFunctionList.get(fromLayer));

    // add bias
    DoubleVector vecWithBias = new DenseDoubleVector(vec.getDimension() + 1);
    vecWithBias.set(0, 1);
    for (int i = 0; i < vec.getDimension(); ++i) {
      vecWithBias.set(i + 1, vec.get(i));
    }
    return vecWithBias;
  }
View Full Code Here

              .format(
                  "The dimension of training instance is %d, but requires %d.",
                  trainingInstance.getDimension(), inputDimension
                      + outputDimension));

      inputInstance = new DenseDoubleVector(this.layerSizeList.get(0));
      inputInstance.set(0, 1); // add bias
      for (int i = 0; i < inputDimension; ++i) {
        inputInstance.set(i + 1, trainingInstance.get(i));
      }

      labels = trainingInstance.sliceUnsafe(inputInstance.getDimension() - 1,
          trainingInstance.getDimension() - 1);
    } else if (this.learningStyle == LearningStyle.UNSUPERVISED) {
      // labels are identical to input features
      outputDimension = inputDimension;
      // validate training instance
      Preconditions.checkArgument(inputDimension == trainingInstance
          .getDimension(), String.format(
          "The dimension of training instance is %d, but requires %d.",
          trainingInstance.getDimension(), inputDimension));

      inputInstance = new DenseDoubleVector(this.layerSizeList.get(0));
      inputInstance.set(0, 1); // add bias
      for (int i = 0; i < inputDimension; ++i) {
        inputInstance.set(i + 1, trainingInstance.get(i));
      }
      labels = trainingInstance.deepCopy();
View Full Code Here

        .size()];
    for (int m = 0; m < weightUpdateMatrices.length; ++m) {
      weightUpdateMatrices[m] = new DenseDoubleMatrix(this.weightMatrixList
          .get(m).getRowCount(), this.weightMatrixList.get(m).getColumnCount());
    }
    DoubleVector deltaVec = new DenseDoubleVector(
        this.layerSizeList.get(this.layerSizeList.size() - 1));

    DoubleFunction squashingFunction = this.squashingFunctionList
        .get(this.squashingFunctionList.size() - 1);

    DoubleMatrix lastWeightMatrix = this.weightMatrixList
        .get(this.weightMatrixList.size() - 1);
    for (int i = 0; i < deltaVec.getDimension(); ++i) {
      double costFuncDerivative = this.costFunction.applyDerivative(
          labels.get(i), output.get(i + 1));
      // add regularization
      costFuncDerivative += this.regularizationWeight
          * lastWeightMatrix.getRowVector(i).sum();
      deltaVec.set(i, costFuncDerivative);
      deltaVec.set(
          i,
          deltaVec.get(i)
              * squashingFunction.applyDerivative(output.get(i + 1)));
    }

    // start from previous layer of output layer
    for (int layer = this.layerSizeList.size() - 2; layer >= 0; --layer) {
View Full Code Here

TOP

Related Classes of org.apache.hama.ml.math.DenseDoubleVector$DefaultIterator

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.