Package org.apache.hama.ml.math

Examples of org.apache.hama.ml.math.DoubleFunction


    DoubleVector internalInstance = new DenseDoubleVector(inputInstance.getDimension() + 1);
    internalInstance.set(0, 1);
    for (int i = 0; i < inputInstance.getDimension(); ++i) {
      internalInstance.set(i + 1, inputInstance.get(i));
    }
    DoubleFunction squashingFunction = model
        .getSquashingFunction(inputLayer);
    DoubleMatrix weightMatrix = null;
    if (inputLayer == 0) {
      weightMatrix = this.getEncodeWeightMatrix();
    } else {
View Full Code Here


      int row = isFinalLayer ? size : size - 1;
      int col = sizePrevLayer;
      DoubleMatrix weightMatrix = new DenseDoubleMatrix(row, col);
      // initialize weights
      final Random rnd = new Random();
      weightMatrix.applyToElements(new DoubleFunction() {
        @Override
        public double apply(double value) {
          return rnd.nextDouble() - 0.5;
        }
View Full Code Here

          .get(m).getRowCount(), this.weightMatrixList.get(m).getColumnCount());
    }
    DoubleVector deltaVec = new DenseDoubleVector(
        this.layerSizeList.get(this.layerSizeList.size() - 1));

    DoubleFunction squashingFunction = this.squashingFunctionList
        .get(this.squashingFunctionList.size() - 1);

    DoubleMatrix lastWeightMatrix = this.weightMatrixList
        .get(this.weightMatrixList.size() - 1);
    for (int i = 0; i < deltaVec.getDimension(); ++i) {
      double costFuncDerivative = this.costFunction.applyDerivative(
          labels.get(i), output.get(i + 1));
      // add regularization
      costFuncDerivative += this.regularizationWeight
          * lastWeightMatrix.getRowVector(i).sum();
      deltaVec.set(i, costFuncDerivative);
      deltaVec.set(
          i,
          deltaVec.get(i)
              * squashingFunction.applyDerivative(output.get(i + 1)));
    }

    // start from previous layer of output layer
    for (int layer = this.layerSizeList.size() - 2; layer >= 0; --layer) {
      output = internalResults.get(layer);
View Full Code Here

  private DoubleVector backpropagate(int curLayerIdx,
      DoubleVector nextLayerDelta, List<DoubleVector> outputCache,
      DenseDoubleMatrix weightUpdateMatrix) {

    // get layer related information
    DoubleFunction squashingFunction = this.squashingFunctionList
        .get(curLayerIdx);
    DoubleVector curLayerOutput = outputCache.get(curLayerIdx);
    DoubleMatrix weightMatrix = this.weightMatrixList.get(curLayerIdx);
    DoubleMatrix prevWeightMatrix = this.prevWeightUpdatesList.get(curLayerIdx);

    // next layer is not output layer, remove the delta of bias neuron
    if (curLayerIdx != this.layerSizeList.size() - 2) {
      nextLayerDelta = nextLayerDelta.slice(1,
          nextLayerDelta.getDimension() - 1);
    }

    DoubleVector delta = weightMatrix.transpose()
        .multiplyVector(nextLayerDelta);
    for (int i = 0; i < delta.getDimension(); ++i) {
      delta.set(
          i,
          delta.get(i)
              * squashingFunction.applyDerivative(curLayerOutput.get(i)));
    }

    // System.out.printf("Delta layer: %d, %s\n", curLayerIdx,
    // delta.toString());

View Full Code Here

    for (int i = 0; i < this.numberOfLayers - 1; ++i) {
      // add weights for bias
      this.weightMatrice[i] = new DenseDoubleMatrix(this.layerSizeArray[i] + 1,
          this.layerSizeArray[i + 1]);

      this.weightMatrice[i].applyToElements(new DoubleFunction() {

        private Random rnd = new Random();

        @Override
        public double apply(double value) {
View Full Code Here

TOP

Related Classes of org.apache.hama.ml.math.DoubleFunction

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.