Package de.jungblut.math.sparse

Examples of de.jungblut.math.sparse.SparseDoubleRowMatrix


        "Features and Outcomes need to match in length!");
    DoubleMatrix x = null;
    DoubleMatrix y = null;
    // add the bias
    if (features[0].isSparse()) {
      x = new SparseDoubleRowMatrix(DenseDoubleVector.ones(features.length),
          new SparseDoubleRowMatrix(features));
    } else {
      x = new DenseDoubleMatrix(DenseDoubleVector.ones(features.length),
          new DenseDoubleMatrix(features));
    }
    if (outcome[0].isSparse()) {
      y = new SparseDoubleRowMatrix(outcome);
    } else {
      y = new DenseDoubleMatrix(outcome);
    }
    // transpose y to get a faster lookup in the cost function
    y = y.transpose();
View Full Code Here


        DoubleVector[] outcomeSubArray = ArrayUtils.subArray(outcomeMatrix,
            start, end);
        outcomeMat = new DenseDoubleMatrix(outcomeSubArray);
      }
      DenseDoubleVector bias = DenseDoubleVector.ones(featureSubArray.length);
      DoubleMatrix featureMatrix = sparse ? new SparseDoubleRowMatrix(
          featureSubArray) : new DenseDoubleMatrix(featureSubArray);
      DoubleMatrix featuresWithBias = sparse ? new SparseDoubleRowMatrix(bias,
          featureMatrix) : new DenseDoubleMatrix(bias, featureMatrix);
      batches.add(new Tuple<>(featuresWithBias, outcomeMat));
    }
  }
View Full Code Here

   */
  public SparseMultiLabelRegression(int epochs, double alpha, int numFeatures,
      int numOutcomes) {
    this.epochs = epochs;
    this.alpha = alpha;
    this.weights = new SparseDoubleRowMatrix(numFeatures, numOutcomes);
  }
View Full Code Here

        DoubleVector feature = tuple.getFirst();
        DoubleVector outcome = tuple.getSecond();
        DoubleVector z1 = theta.multiplyVectorColumn(feature);
        DoubleVector activations = SIGMOID.apply(z1);
        double loss = LOSS.calculateError(
            new SparseDoubleRowMatrix(Arrays.asList(outcome)),
            new SparseDoubleRowMatrix(Arrays.asList(activations)));
        lossSum += loss;
        DoubleVector activationDifference = activations.subtract(outcome);
        // update theta by a smarter sparsity algorithm
        Iterator<DoubleVectorElement> featureIterator = feature
            .iterateNonZero();
View Full Code Here

    int numDistinctClasses = first.getSecond().getDimension();
    // respect the binary case
    numDistinctClasses = numDistinctClasses == 1 ? 2 : numDistinctClasses;
    // sparse row representations, so every class has the features as a hashset
    // of values. This gives good compression for many class problems.
    probabilityMatrix = new SparseDoubleRowMatrix(numDistinctClasses, first
        .getFirst().getDimension());

    int[] tokenPerClass = new int[numDistinctClasses];
    int[] numDocumentsPerClass = new int[numDistinctClasses];
View Full Code Here

   */
  private final DoubleMatrix transitionProbabilities;
  private final int numStates;

  private MarkovChain(int numStates) {
    this(numStates, new SparseDoubleRowMatrix(numStates, numStates));
  }
View Full Code Here

    return newInstance;
  }

  protected DoubleMatrix newInstance(DoubleMatrix mat) {
    if (mat.isSparse()) {
      return new SparseDoubleRowMatrix(mat.getRowCount(), mat.getColumnCount());
    } else {
      return new DenseDoubleMatrix(mat.getRowCount(), mat.getColumnCount());
    }
  }
View Full Code Here

   * @return a matrix containing the predicted hidden state on each row vector.
   */
  public DoubleMatrix decode(DoubleVector[] observationSequence,
      DoubleVector[] featuresPerHiddenState) {
    return ViterbiUtils.decode(emissionProbabilityMatrix,
        new SparseDoubleRowMatrix(observationSequence),
        new SparseDoubleRowMatrix(featuresPerHiddenState), numHiddenStates);
  }
View Full Code Here

   * @return a new sparse matrix from the stream.
   * @throws IOException in case of an IO error.
   */
  public static SparseDoubleRowMatrix readSparseMatrix(DataInput in)
      throws IOException {
    SparseDoubleRowMatrix mat = new SparseDoubleRowMatrix(in.readInt(),
        in.readInt());

    final int numRowIndices = in.readInt();
    for (int i = 0; i < numRowIndices; i++) {
      final int rowIndex = in.readInt();
      final int numColumns = in.readInt();
      DoubleVector row = new SparseDoubleVector(mat.getColumnCount());
      for (int j = 0; j < numColumns; j++) {
        row.set(in.readInt(), in.readDouble());
      }
      mat.setRowVector(rowIndex, row);
    }

    return mat;
  }
View Full Code Here

public class HammingLossFunctionTest {

  @Test
  public void testSingleHammingLoss() {
    SparseDoubleRowMatrix groundTruth = new SparseDoubleRowMatrix(
        new double[][] { { 1, 1, 1, 1 } });

    SparseDoubleRowMatrix hypo = new SparseDoubleRowMatrix(new double[][] { {
        0, 1, 1, 1 } });

    HammingLossFunction fnc = new HammingLossFunction(0.5d);

    double err = fnc.calculateError(groundTruth, hypo);
View Full Code Here

TOP

Related Classes of de.jungblut.math.sparse.SparseDoubleRowMatrix

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.