Package de.jungblut.math

Examples of de.jungblut.math.DoubleVector


      tree.add(v, null);

    int index = 0;
    Iterator<DoubleVector> iterator = tree.iterator();
    while (iterator.hasNext()) {
      DoubleVector next = iterator.next();
      assertEquals(result[index++], next);
    }
    assertEquals(array.length, index);
  }
View Full Code Here


  @Test
  public void testMaximize() {
    // maximize -x^2-y^2
    // derivative is -2*x and -2*y
    // the max should be at 0
    DoubleVector theta = new DenseDoubleVector(new double[] { -25, -25 });
    DoubleVector minimizeFunction = Fmincg.minimizeFunction(
        new NegatedCostFunction(new CostFunction() {
          @Override
          public CostGradientTuple evaluateCost(DoubleVector input) {
            double cost = -Math.pow(input.get(0), 2)
                - Math.pow(input.get(1), 2);
            DenseDoubleVector gradient = new DenseDoubleVector(new double[] {
                -2 * input.get(0), -2 * input.get(1) });

            return new CostGradientTuple(cost, gradient);
          }
        }), theta, 10, false);

    assertEquals(0d, minimizeFunction.get(0), 1e-5);
    assertEquals(0d, minimizeFunction.get(1), 1e-5);
  }
View Full Code Here

    assertEquals(10d, error, 1e-4);
  }

  @Test
  public void testSigmoidErrorVector() {
    DoubleVector y = new DenseDoubleVector(new double[] { 0d, 1d, 0d, 1d, 0d });
    DoubleVector hypothesis = new DenseDoubleVector(new double[] { 0d, 0d, 0d,
        1d, 0d });
    double error = new LogisticErrorFunction().calculateError(y, hypothesis);
    assertEquals(10d, error, 1e-4);
  }
View Full Code Here

    DoubleVector[] train = sampleXOR.getFirst();
    DoubleVector[] outcome = sampleXOR.getSecond();

    for (int i = 0; i < train.length; i++) {
      DoubleVector predict = mlp.predict(train[i]);
      assertEquals(outcome[i].get(0), Math.rint(predict.get(0)), 1e-4);
    }
  }
View Full Code Here

    DoubleVector[] train = sampleXOR.getFirst();
    DoubleVector[] outcome = sampleXOR.getSecond();

    double absDifference = 0d;
    for (int i = 0; i < train.length; i++) {
      DoubleVector predict = mlp.predict(train[i]);
      absDifference += Math.abs(outcome[i].get(0) - predict.get(0));
    }
    return absDifference;
  }
View Full Code Here

    DoubleVector[] train = sampleXOR.getFirst();
    DoubleVector[] outcome = sampleXOR.getSecond();

    for (int i = 0; i < train.length; i++) {
      DoubleVector predict = mlp.predict(train[i]);
      assertEquals(outcome[i].get(0), Math.rint(predict.get(0)), 1e-4);
    }
  }
View Full Code Here

  @Test
  public void testSimpleParable() {
    int startPoint = -5;
    // start at x=-5
    DoubleVector start = new DenseDoubleVector(new double[] { startPoint });

    // our function is f(x) = (4-x)^2+10
    // the derivative is f'(x) = 2x-8
    CostFunction inlineFunction = new CostFunction() {
      @Override
      public CostGradientTuple evaluateCost(DoubleVector input) {

        double cost = Math.pow(4 - input.get(0), 2) + 10;
        DenseDoubleVector gradient = new DenseDoubleVector(
            new double[] { 2 * input.get(0) - 8 });

        return new CostGradientTuple(cost, gradient);
      }
    };

    DoubleVector minimizeFunction = Fmincg.minimizeFunction(inlineFunction,
        start, 100, false);
    assertEquals(4.0d, minimizeFunction.get(0), 1e-5);
  }
View Full Code Here

    DenseDoubleMatrix mat1 = new DenseDoubleMatrix(new double[][] {
        { 1, 2, 3 }, { 4, 5, 6 } });
    DenseDoubleMatrix mat2 = new DenseDoubleMatrix(new double[][] {
        { 7, 8, 9 }, { 10, 11, 12 } });

    DoubleVector foldMatrices = DenseMatrixFolder.foldMatrices(mat1, mat2);
    assertEquals(12, foldMatrices.getLength());
    assertEquals(0.0d, referenceFold.subtract(foldMatrices).sum(), 1e-5);

    DoubleMatrix[] unfoldMatrices = DenseMatrixFolder.unfoldMatrices(
        foldMatrices, new int[][] { { 2, 3 }, { 2, 3 } });
View Full Code Here

public class GradientDescentTest {

  @Test
  public void testGradientDescent() {

    DoubleVector start = new DenseDoubleVector(new double[] { 2, -1 });

    CostFunction inlineFunction = getCostFunction();

    DoubleVector minimizeFunction = GradientDescent.minimizeFunction(
        inlineFunction, start, 0.5d, 1E-20, 1000, false);
    // 1E-5 is close enough to zero for the test to pass
    assertEquals(minimizeFunction.get(0), 0, 1E-5);
    assertEquals(minimizeFunction.get(1), 0, 1E-5);
  }
View Full Code Here

    SparseKNearestNeighbours neighbours = new SparseKNearestNeighbours(2, 2,
        new CosineDistance());

    // we seperate stuff in two dimensions each
    DoubleVector left = new SingleEntryDoubleVector(0d);
    DoubleVector right = new SingleEntryDoubleVector(1d);
    DoubleVector v1 = new SparseDoubleVector(4);
    v1.set(0, 1d);
    v1.set(1, 1d);

    DoubleVector v2 = new SparseDoubleVector(4);
    v2.set(2, 1d);
    v2.set(3, 2.5);

    DoubleVector v3 = new SparseDoubleVector(4);
    v3.set(0, 2d);
    v3.set(1, 2d);

    DoubleVector v4 = new SparseDoubleVector(4);
    v4.set(2, 0.5);
    v4.set(3, 1.5);

    DoubleVector[] trainingSet = new DoubleVector[] { v1, v2, v3, v4 };
    DoubleVector[] outcomeSet = new DoubleVector[] { left, right, left, right };

    neighbours.train(trainingSet, outcomeSet);

    DoubleVector predict = neighbours.predict(v4);
    assertEquals(right, predict);

    predict = neighbours.predict(v2);
    assertEquals(right, predict);

    predict = neighbours.predict(v1);
    assertEquals(left, predict);

    predict = neighbours.predict(v3);
    assertEquals(left, predict);

    // predict between, slightly to the right
    DoubleVector vx = new SparseDoubleVector(4);
    vx.set(1, 1d);
    vx.set(3, 2.5);

    predict = neighbours.predict(vx);
    assertEquals(right, predict);
  }
View Full Code Here

TOP

Related Classes of de.jungblut.math.DoubleVector

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.