Package org.apache.hama.ml.math

Examples of org.apache.hama.ml.math.DenseDoubleMatrix


    this.weightMatrice = new DenseDoubleMatrix[this.numberOfLayers - 1];
    // each layer contains one bias neuron
    Random rnd = new Random();
    for (int i = 0; i < this.numberOfLayers - 1; ++i) {
      // add weights for bias
      this.weightMatrice[i] = new DenseDoubleMatrix(this.layerSizeArray[i] + 1,
          this.layerSizeArray[i + 1]);
      int rowCount = this.weightMatrice[i].getRowCount();
      int colCount = this.weightMatrice[i].getColumnCount();
      for (int row = 0; row < rowCount; ++row) {
        for (int col = 0; col < colCount; ++col) {
View Full Code Here


  private void initializePrevWeightUpdateMatrix() {
    this.prevWeightUpdateMatrices = new DenseDoubleMatrix[this.numberOfLayers - 1];
    for (int i = 0; i < this.prevWeightUpdateMatrices.length; ++i) {
      int row = this.layerSizeArray[i] + 1;
      int col = this.layerSizeArray[i + 1];
      this.prevWeightUpdateMatrices[i] = new DenseDoubleMatrix(row, col);
    }
  }
View Full Code Here

  DenseDoubleMatrix[] trainByInstance(DoubleVector trainingInstance)
      throws Exception {
    // initialize weight update matrices
    DenseDoubleMatrix[] weightUpdateMatrices = new DenseDoubleMatrix[this.layerSizeArray.length - 1];
    for (int m = 0; m < weightUpdateMatrices.length; ++m) {
      weightUpdateMatrices[m] = new DenseDoubleMatrix(
          this.layerSizeArray[m] + 1, this.layerSizeArray[m + 1]);
    }

    if (trainingInstance == null) {
      return weightUpdateMatrices;
    }

    double[] trainingVec = trainingInstance.toArray();
    double[] trainingFeature = Arrays.copyOfRange(trainingVec, 0,
        this.layerSizeArray[0]);
    double[] trainingLabels = Arrays.copyOfRange(trainingVec,
        this.layerSizeArray[0], trainingVec.length);

    DoubleVector trainingFeatureVec = new DenseDoubleVector(trainingFeature);
    List<double[]> outputCache = this.outputInternal(trainingFeatureVec);

    // calculate the delta of output layer
    double[] delta = new double[this.layerSizeArray[this.layerSizeArray.length - 1]];
    double[] outputLayerOutput = outputCache.get(outputCache.size() - 1);
    double[] lastHiddenLayerOutput = outputCache.get(outputCache.size() - 2);

    DenseDoubleMatrix prevWeightUpdateMatrix = this.prevWeightUpdateMatrices[this.prevWeightUpdateMatrices.length - 1];
    for (int j = 0; j < delta.length; ++j) {
      delta[j] = this.costFunction.calculateDerivative(trainingLabels[j],
          outputLayerOutput[j]);
      // add regularization term
      if (this.regularization != 0.0) {
        double derivativeRegularization = 0.0;
        DenseDoubleMatrix weightMatrix = this.weightMatrice[this.weightMatrice.length - 1];
        for (int k = 0; k < this.layerSizeArray[this.layerSizeArray.length - 1]; ++k) {
          derivativeRegularization += weightMatrix.get(k, j);
        }
        derivativeRegularization /= this.layerSizeArray[this.layerSizeArray.length - 1];
        delta[j] += this.regularization * derivativeRegularization;
      }

View Full Code Here

    int prevLayerIdx = curLayerIdx - 1;
    double[] delta = new double[this.layerSizeArray[curLayerIdx]];
    double[] curLayerOutput = outputCache.get(curLayerIdx);
    double[] prevLayerOutput = outputCache.get(prevLayerIdx);

    DenseDoubleMatrix prevWeightUpdateMatrix = this.prevWeightUpdateMatrices[curLayerIdx - 1];
    // for each neuron j in nextLayer, calculate the delta
    for (int j = 0; j < delta.length; ++j) {
      // aggregate delta from next layer
      for (int k = 0; k < nextLayerDelta.length; ++k) {
        double weight = this.weightMatrice[curLayerIdx].get(j, k);
        delta[j] += weight * nextLayerDelta[k];
      }
      delta[j] *= this.squashingFunction
          .calculateDerivative(curLayerOutput[j + 1]);

      // calculate the weight update matrix between the previous layer and the
      // current layer
      for (int i = 0; i < weightUpdateMatrices[prevLayerIdx].getRowCount(); ++i) {
        double updatedValue = -this.learningRate * delta[j]
            * prevLayerOutput[i];
        // add momemtum
        updatedValue += this.momentum * prevWeightUpdateMatrix.get(i, j);
        weightUpdateMatrices[prevLayerIdx].set(i, j, updatedValue);
      }
    }

    return delta;
View Full Code Here

   */
  private DenseDoubleMatrix[] getZeroWeightMatrices() {
    DenseDoubleMatrix[] weightUpdateCache = new DenseDoubleMatrix[this.layerSizeArray.length - 1];
    // initialize weight matrix each layer
    for (int i = 0; i < weightUpdateCache.length; ++i) {
      weightUpdateCache[i] = new DenseDoubleMatrix(this.layerSizeArray[i] + 1,
          this.layerSizeArray[i + 1]);
    }
    return weightUpdateCache;
  }
View Full Code Here

      double[][] matrix23 = { // 4 by 3
      { 0.2, 0.5, 0.2 }, { 0.5, 0.1, 0.5 }, { 0.1, 0.2, 0.1 },
          { 0.1, 0.2, 0.5 } };

      DoubleMatrix[] matrices = { new DenseDoubleMatrix(matrix01),
          new DenseDoubleMatrix(matrix12), new DenseDoubleMatrix(matrix23) };
      for (DoubleMatrix mat : matrices) {
        MatrixWritable.write(mat, output);
      }
      output.close();
View Full Code Here

      }
    }
  }

  public static DoubleMatrix read(DataInput in) throws IOException {
    DoubleMatrix mat = new DenseDoubleMatrix(in.readInt(), in.readInt());
    for (int row = 0; row < mat.getRowCount(); row++) {
      for (int col = 0; col < mat.getColumnCount(); col++) {
        mat.set(row, col, in.readDouble());
      }
    }
    return mat;
  }
View Full Code Here

    double error = 0.22;
    double[][] matrix1 = new double[][] { { 0.1, 0.2, 0.8, 0.5 },
        { 0.3, 0.4, 0.6, 0.2 }, { 0.5, 0.6, 0.1, 0.5 } };
    double[][] matrix2 = new double[][] { { 0.8, 1.2, 0.5 } };
    DoubleMatrix[] matrices = new DoubleMatrix[2];
    matrices[0] = new DenseDoubleMatrix(matrix1);
    matrices[1] = new DenseDoubleMatrix(matrix2);

    boolean isConverge = false;

    SmallLayeredNeuralNetworkMessage message = new SmallLayeredNeuralNetworkMessage(
        error, isConverge, matrices, null);
View Full Code Here

    double[][] matrix1 = new double[][] { { 0.1, 0.2, 0.8, 0.5 },
        { 0.3, 0.4, 0.6, 0.2 }, { 0.5, 0.6, 0.1, 0.5 } };
    double[][] matrix2 = new double[][] { { 0.8, 1.2, 0.5 } };
    DoubleMatrix[] matrices = new DoubleMatrix[2];
    matrices[0] = new DenseDoubleMatrix(matrix1);
    matrices[1] = new DenseDoubleMatrix(matrix2);

    double[][] prevMatrix1 = new double[][] { { 0.1, 0.1, 0.2, 0.3 },
        { 0.2, 0.4, 0.1, 0.5 }, { 0.5, 0.1, 0.5, 0.2 } };
    double[][] prevMatrix2 = new double[][] { { 0.1, 0.2, 0.5, 0.9 },
        { 0.3, 0.5, 0.2, 0.6 }, { 0.6, 0.8, 0.7, 0.5 } };

    DoubleMatrix[] prevMatrices = new DoubleMatrix[2];
    prevMatrices[0] = new DenseDoubleMatrix(prevMatrix1);
    prevMatrices[1] = new DenseDoubleMatrix(prevMatrix2);

    SmallLayeredNeuralNetworkMessage message = new SmallLayeredNeuralNetworkMessage(
        error, isConverge, matrices, prevMatrices);
    Configuration conf = new Configuration();
    String strPath = "/tmp/testReadWriteSmallLayeredNeuralNetworkMessageWithPrev";
View Full Code Here

    ann.setMomemtumWeight(momentumWeight);
    double regularizationWeight = 0.05;
    ann.setRegularizationWeight(regularizationWeight);
    // intentionally initialize all weights to 0.5
    DoubleMatrix[] matrices = new DenseDoubleMatrix[2];
    matrices[0] = new DenseDoubleMatrix(5, 3, 0.2);
    matrices[1] = new DenseDoubleMatrix(1, 6, 0.8);
    ann.setWeightMatrices(matrices);
    ann.setLearningStyle(LearningStyle.UNSUPERVISED);

    // write to file
    String modelPath = "/tmp/testSmallLayeredNeuralNetworkReadWrite";
View Full Code Here

TOP

Related Classes of org.apache.hama.ml.math.DenseDoubleMatrix

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.