Package de.jungblut.math.dense

Examples of de.jungblut.math.dense.DenseDoubleMatrix


    // add the bias
    if (features[0].isSparse()) {
      x = new SparseDoubleRowMatrix(DenseDoubleVector.ones(features.length),
          new SparseDoubleRowMatrix(features));
    } else {
      x = new DenseDoubleMatrix(DenseDoubleVector.ones(features.length),
          new DenseDoubleMatrix(features));
    }
    if (outcome[0].isSparse()) {
      y = new SparseDoubleRowMatrix(outcome);
    } else {
      y = new DenseDoubleMatrix(outcome);
    }
    // transpose y to get a faster lookup in the cost function
    y = y.transpose();

    LogisticRegressionCostFunction cnf = new LogisticRegressionCostFunction(x,
View Full Code Here


   */
  public static DoubleMatrix[] unfoldMatrices(DoubleVector vector,
      int[][] sizeArray) {
    DoubleMatrix[] arr = new DoubleMatrix[sizeArray.length];
    for (int i = 0; i < sizeArray.length; i++) {
      arr[i] = new DenseDoubleMatrix(sizeArray[i][0], sizeArray[i][1]);
    }

    int currentVectorIndex = 0;
    for (int i = 0; i < arr.length; i++) {
      final int numRows = sizeArray[i][0];
View Full Code Here

   * @param cols the number of columns the target matrix needs to have.
   * @return a matrix with the contents of the vector, row split.
   */
  public static DoubleMatrix unfoldMatrix(DoubleVector vector, int rows,
      int cols) {
    DoubleMatrix mat = new DenseDoubleMatrix(rows, cols);

    int index = 0;
    for (int i = 0; i < rows; i++) {
      for (int j = 0; j < cols; j++) {
        mat.set(i, j, vector.get(index++));
      }
    }

    return mat;
  }
View Full Code Here

      boolean sparse = featureSubArray[0].isSparse();
      DoubleMatrix outcomeMat = null;
      if (outcomeMatrix != null) {
        DoubleVector[] outcomeSubArray = ArrayUtils.subArray(outcomeMatrix,
            start, end);
        outcomeMat = new DenseDoubleMatrix(outcomeSubArray);
      }
      DenseDoubleVector bias = DenseDoubleVector.ones(featureSubArray.length);
      DoubleMatrix featureMatrix = sparse ? new SparseDoubleRowMatrix(
          featureSubArray) : new DenseDoubleMatrix(featureSubArray);
      DoubleMatrix featuresWithBias = sparse ? new SparseDoubleRowMatrix(bias,
          featureMatrix) : new DenseDoubleMatrix(bias, featureMatrix);
      batches.add(new Tuple<>(featuresWithBias, outcomeMat));
    }
  }
View Full Code Here

  @Override
  public CostGradientTuple evaluateCost(DoubleVector theta) {

    DoubleVector activation = SIGMOID.get().apply(x.multiplyVectorRow(theta));
    DenseDoubleMatrix hypo = new DenseDoubleMatrix(Arrays.asList(activation));
    double error = ERROR_FUNCTION.calculateError(y, hypo);
    DoubleMatrix loss = hypo.subtract(y);
    double j = error / m;
    DoubleVector gradient = xTransposed.multiplyVectorRow(loss.getRowVector(0))
        .divide(m);
    if (lambda != 0d) {
      DoubleVector reg = theta.multiply(lambda / m);
View Full Code Here

   * seed the values using the famous uniform distribution formula of LeCun.
   * Which is calculating the deviation of the weights by SQRT(6)/((num units
   * left layer)+(num units right layer)) and distributing them with zero mean.
   */
  public WeightMatrix(int unitsLeftLayer, int unitsRightLayer) {
    this.weights = new DenseDoubleMatrix(unitsRightLayer, unitsLeftLayer + 1);
    double eInit = Math.sqrt(6) / Math.sqrt(unitsLeftLayer + unitsRightLayer);
    setWeightsUniformly(seedRandomGenerator(), eInit);
  }
View Full Code Here

    for (int i = 0; i < layerSizes.length; i++) {
      if (verbose) {
        LOG.info("Training stack at height: " + i);
      }
      // add the bias to hidden and visible layer, random init with 0.1*randn
      DenseDoubleMatrix start = new DenseDoubleMatrix(layerSizes[i] + 1,
          currentTrainingSet[0].getDimension() + 1, new Random(seed))
          .multiply(0.1d);
      DoubleVector folded = DenseMatrixFolder.foldMatrices(start);
      start = null;
      // now do the real training
View Full Code Here

      DoubleMatrix[] zx, NetworkConfiguration conf) {
    for (int i = 1; i < conf.layerSizes.length; i++) {
      zx[i] = multiply(ax[i - 1], thetas[i - 1], false, true, conf);

      if (i < (conf.layerSizes.length - 1)) {
        ax[i] = new DenseDoubleMatrix(DenseDoubleVector.ones(zx[i]
            .getRowCount()), conf.activations[i].apply(zx[i]));
        if (conf.hiddenDropoutProbability > 0d) {
          // compute dropout for ax[i]
          dropout(conf.rnd, ax[i], conf.hiddenDropoutProbability);
        }
View Full Code Here

          dim.getLdB(), 0d, deviceResultPointer, dim.getLdC());
    }

    JCuda.cudaDeviceSynchronize();

    DenseDoubleMatrix matrix = getMatrix(deviceResultPointer, dim.getM(),
        dim.getN());

    freePointer(deviceResultPointer);

    return matrix;
View Full Code Here

   */
  public static DenseDoubleMatrix multiply(DenseDoubleMatrix a,
      DenseDoubleMatrix b, boolean transposeA, boolean transposeB) {
    Pointer matrixPointerA = memcpyMatrix(a);
    Pointer matrixPointerB = memcpyMatrix(b);
    DenseDoubleMatrix matrix = multiply(matrixPointerA, matrixPointerB,
        new MatrixDimension(a, b, transposeA, transposeB));
    freePointer(matrixPointerA);
    freePointer(matrixPointerB);
    return matrix;
  }
View Full Code Here

TOP

Related Classes of de.jungblut.math.dense.DenseDoubleMatrix

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.