Package org.apache.hama.commons.math

Examples of org.apache.hama.commons.math.DenseDoubleMatrix


    peer.sync();

    // normalize
    DoubleMatrix res = null;
    if (peer.getPeerName().equals(master)) {
      res = new DenseDoubleMatrix(featureMatrix.getRowCount(),
                                  featureMatrix.getColumnCount(), 0);
      int incomingMsgCount = 0;
      while ( (msg = peer.getCurrentMessage()) != null) {
        MatrixWritable tmp = (MatrixWritable) msg.get(msgFeatureMatrix);
        res.add(tmp.getMatrix());
View Full Code Here


        inpUsersFeatures.put(userId, (VectorWritable)msg.get(OnlineCF.Settings.MSG_VALUE));
        userFeatureSize = ((VectorWritable)msg.get(OnlineCF.Settings.MSG_VALUE)).getVector().getLength();
      }
    }
    if (inpItemsFeatures.size() > 0) {
      itemFeatureMatrix = new DenseDoubleMatrix(MATRIX_RANK, itemFeatureSize, rnd);
    }
    if (inpUsersFeatures.size() > 0) {
      userFeatureMatrix = new DenseDoubleMatrix(MATRIX_RANK, userFeatureSize, rnd);
    }
  }
View Full Code Here

      double[][] matrix23 = { // 4 by 3
      { 0.2, 0.5, 0.2 }, { 0.5, 0.1, 0.5 }, { 0.1, 0.2, 0.1 },
          { 0.1, 0.2, 0.5 } };

      DoubleMatrix[] matrices = { new DenseDoubleMatrix(matrix01),
          new DenseDoubleMatrix(matrix12), new DenseDoubleMatrix(matrix23) };
      for (DoubleMatrix mat : matrices) {
        MatrixWritable.write(mat, output);
      }

      // serialize the feature transformer
View Full Code Here

    double error = 0.22;
    double[][] matrix1 = new double[][] { { 0.1, 0.2, 0.8, 0.5 },
        { 0.3, 0.4, 0.6, 0.2 }, { 0.5, 0.6, 0.1, 0.5 } };
    double[][] matrix2 = new double[][] { { 0.8, 1.2, 0.5 } };
    DoubleMatrix[] matrices = new DoubleMatrix[2];
    matrices[0] = new DenseDoubleMatrix(matrix1);
    matrices[1] = new DenseDoubleMatrix(matrix2);

    boolean isConverge = false;

    SmallLayeredNeuralNetworkMessage message = new SmallLayeredNeuralNetworkMessage(
        error, isConverge, matrices, null);
View Full Code Here

    double[][] matrix1 = new double[][] { { 0.1, 0.2, 0.8, 0.5 },
        { 0.3, 0.4, 0.6, 0.2 }, { 0.5, 0.6, 0.1, 0.5 } };
    double[][] matrix2 = new double[][] { { 0.8, 1.2, 0.5 } };
    DoubleMatrix[] matrices = new DoubleMatrix[2];
    matrices[0] = new DenseDoubleMatrix(matrix1);
    matrices[1] = new DenseDoubleMatrix(matrix2);

    double[][] prevMatrix1 = new double[][] { { 0.1, 0.1, 0.2, 0.3 },
        { 0.2, 0.4, 0.1, 0.5 }, { 0.5, 0.1, 0.5, 0.2 } };
    double[][] prevMatrix2 = new double[][] { { 0.1, 0.2, 0.5, 0.9 },
        { 0.3, 0.5, 0.2, 0.6 }, { 0.6, 0.8, 0.7, 0.5 } };

    DoubleMatrix[] prevMatrices = new DoubleMatrix[2];
    prevMatrices[0] = new DenseDoubleMatrix(prevMatrix1);
    prevMatrices[1] = new DenseDoubleMatrix(prevMatrix2);

    SmallLayeredNeuralNetworkMessage message = new SmallLayeredNeuralNetworkMessage(
        error, isConverge, matrices, prevMatrices);
    Configuration conf = new Configuration();
    String strPath = "/tmp/testReadWriteSmallLayeredNeuralNetworkMessageWithPrev";
View Full Code Here

    double[][] mat = { { 1, 2, 3 }, { 4, 5, 6 }, { 7, 8, 9 } };
    double[][] mat2 = { { 10, 20 }, { 30, 40 }, { 50, 60 } };
    double[][][] mats = { mat, mat2 };

    DenseDoubleMatrix[] matrices = new DenseDoubleMatrix[] {
        new DenseDoubleMatrix(mat), new DenseDoubleMatrix(mat2) };

    SmallMLPMessage message = new SmallMLPMessage(owner, true, matrices);

    Configuration conf = new Configuration();
    String strPath = "/tmp/testSmallMLPMessage";
View Full Code Here

        { 0.7, 0.8, 0.9 } };
    double[][] prevMat2 = { { 1, 2 }, { 3, 4 }, { 5, 6 } };
    double[][][] prevMats = { prevMat, prevMat2 };

    DenseDoubleMatrix[] matrices = new DenseDoubleMatrix[] {
        new DenseDoubleMatrix(mat), new DenseDoubleMatrix(mat2) };

    DenseDoubleMatrix[] prevMatrices = new DenseDoubleMatrix[] {
        new DenseDoubleMatrix(prevMat), new DenseDoubleMatrix(prevMat2) };

    boolean terminated = false;
    SmallMLPMessage message = new SmallMLPMessage(owner, terminated, matrices,
        prevMatrices);
View Full Code Here

      }
    }
  }

  public static DoubleMatrix read(DataInput in) throws IOException {
    DoubleMatrix mat = new DenseDoubleMatrix(in.readInt(), in.readInt());
    for (int row = 0; row < mat.getRowCount(); row++) {
      for (int col = 0; col < mat.getColumnCount(); col++) {
        mat.set(row, col, in.readDouble());
      }
    }
    return mat;
  }
View Full Code Here

    ann.setMomemtumWeight(momentumWeight);
    double regularizationWeight = 0.05;
    ann.setRegularizationWeight(regularizationWeight);
    // intentionally initialize all weights to 0.5
    DoubleMatrix[] matrices = new DenseDoubleMatrix[2];
    matrices[0] = new DenseDoubleMatrix(5, 3, 0.2);
    matrices[1] = new DenseDoubleMatrix(1, 6, 0.8);
    ann.setWeightMatrices(matrices);
    ann.setLearningStyle(LearningStyle.UNSUPERVISED);
   
    FeatureTransformer defaultFeatureTransformer = new DefaultFeatureTransformer();
    ann.setFeatureTransformer(defaultFeatureTransformer);
View Full Code Here

    ann.setCostFunction(FunctionFactory
        .createDoubleDoubleFunction("SquaredError"));
    ann.setLearningRate(0.1);
    // intentionally initialize all weights to 0.5
    DoubleMatrix[] matrices = new DenseDoubleMatrix[2];
    matrices[0] = new DenseDoubleMatrix(5, 3, 0.5);
    matrices[1] = new DenseDoubleMatrix(1, 6, 0.5);
    ann.setWeightMatrices(matrices);

    double[] arr = new double[] { 0, 1 };
    DoubleVector training = new DenseDoubleVector(arr);
    DoubleVector result = ann.getOutput(training);
    assertEquals(1, result.getDimension());
    // assertEquals(3, result.get(0), 0.000001);

    // second network
    SmallLayeredNeuralNetwork ann2 = new SmallLayeredNeuralNetwork();
    ann2.addLayer(2, false, FunctionFactory.createDoubleFunction("Sigmoid"));
    ann2.addLayer(3, false, FunctionFactory.createDoubleFunction("Sigmoid"));
    ann2.addLayer(1, true, FunctionFactory.createDoubleFunction("Sigmoid"));
    ann2.setCostFunction(FunctionFactory
        .createDoubleDoubleFunction("SquaredError"));
    ann2.setLearningRate(0.3);
    // intentionally initialize all weights to 0.5
    DoubleMatrix[] matrices2 = new DenseDoubleMatrix[2];
    matrices2[0] = new DenseDoubleMatrix(3, 3, 0.5);
    matrices2[1] = new DenseDoubleMatrix(1, 4, 0.5);
    ann2.setWeightMatrices(matrices2);

    double[] test = { 0, 0 };
    double[] result2 = { 0.807476 };

    DoubleVector vec = ann2.getOutput(new DenseDoubleVector(test));
    assertArrayEquals(result2, vec.toArray(), 0.000001);

    SmallLayeredNeuralNetwork ann3 = new SmallLayeredNeuralNetwork();
    ann3.addLayer(2, false, FunctionFactory.createDoubleFunction("Sigmoid"));
    ann3.addLayer(3, false, FunctionFactory.createDoubleFunction("Sigmoid"));
    ann3.addLayer(1, true, FunctionFactory.createDoubleFunction("Sigmoid"));
    ann3.setCostFunction(FunctionFactory
        .createDoubleDoubleFunction("SquaredError"));
    ann3.setLearningRate(0.3);
    // intentionally initialize all weights to 0.5
    DoubleMatrix[] initMatrices = new DenseDoubleMatrix[2];
    initMatrices[0] = new DenseDoubleMatrix(3, 3, 0.5);
    initMatrices[1] = new DenseDoubleMatrix(1, 4, 0.5);
    ann3.setWeightMatrices(initMatrices);

    double[] instance = { 0, 1 };
    DoubleVector output = ann3.getOutput(new DenseDoubleVector(instance));
    assertEquals(0.8315410, output.get(0), 0.000001);
View Full Code Here

TOP

Related Classes of org.apache.hama.commons.math.DenseDoubleMatrix

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.