Package de.jungblut.math.minimize

Examples of de.jungblut.math.minimize.CostGradientTuple


      reg.set(0, 0d);
      gradient = gradient.add(reg);
      j += lambda * theta.pow(2).sum() / m;
    }

    return new CostGradientTuple(j, gradient);
  }
View Full Code Here


    // calculate our cost (error in the last layer)
    double j = (1.0d / m)
        * conf.error.calculateError(y, ax[conf.layerSizes.length - 1])
        + regularization;

    return new CostGradientTuple(j,
        DenseMatrixFolder.foldMatrices(thetaGradients));
  }
View Full Code Here

      thetaGradient.setColumnVector(0, bias);
    }

    // transpose the gradient and negate it, because we transposed theta at the
    // top and our gradient descent subtracts instead of addition.
    return new CostGradientTuple(j,
        DenseMatrixFolder.foldMatrices((DenseDoubleMatrix) thetaGradient
            .multiply(-1).transpose()));
  }
View Full Code Here

        .getWeights());
    RBMCostFunction fnc = new RBMCostFunction(test, 0, 1, hiddenUnits,
        new SigmoidActivationFunction(), TrainingType.CPU, 0d,
        MultilayerPerceptron.SEED, false);

    CostGradientTuple evaluateCost = fnc.evaluateCost(foldMatrices);

    assertEquals(10.62, evaluateCost.getCost(), 1e-2);
    DoubleVector target = new DenseDoubleVector(new double[] { 0.0,
        0.027379415757720366, 0.029102968186221934, -0.38090575317687425,
        -0.27799120250510584, -0.05453365605307239, 0.028442797042677864,
        -0.007547440696105356, -0.020996345540311157, 0.23725599589259425,
        0.16279353745280023, 0.021913996227666748, 0.21119663986488538,
        0.14066157414419367, 0.018971946780403166, 0.027585532151946184,
        0.07955487735348872, 0.06242886798699649, 0.018894892958963183,
        0.052146356412991667, 0.04730987967580811, -0.08117434385333744,
        -0.006743308468200778, 0.03846403112496833 });

    assertEquals(0d, evaluateCost.getGradient().subtract(target).sum(), 1e-4);

  }
View Full Code Here

    DoubleVector foldMatrices = DenseMatrixFolder.foldMatrices(pInput
        .getWeights());
    RBMCostFunction fnc = new RBMCostFunction(test, 0, 1, hiddenUnits,
        new SigmoidActivationFunction(), TrainingType.CPU, 0.1d,
        MultilayerPerceptron.SEED, false);
    CostGradientTuple evaluateCost = fnc.evaluateCost(foldMatrices);
    assertEquals(10.62, evaluateCost.getCost(), 1e-2);
    DoubleVector target = new DenseDoubleVector(new double[] { 0.0,
        0.02692309216175836, 0.028617918716451567, -0.38090575317687425,
        -0.2733580157966874, -0.05362476178552118, 0.028442797042677864,
        -0.0074216500178369334, -0.020646406447972637, 0.23725599589259425,
        0.1600803118285869, 0.021548762957205637, 0.21119663986488538,
        0.13831721457512378, 0.018655747667396447, 0.027585532151946184,
        0.07822896273093058, 0.06138838685387988, 0.018894892958963183,
        0.05127725047277514, 0.04652138168121131, -0.08117434385333744,
        -0.006630919993730764, 0.037822963939552194 });

    assertEquals(0d, evaluateCost.getGradient().subtract(target).sum(), 1e-4);
  }
View Full Code Here

        double cost = Math.pow(input.get(0), 2) + Math.pow(input.get(1), 2);
        DenseDoubleVector gradient = new DenseDoubleVector(new double[] {
            input.get(0) * 2, input.get(1) * 2 });

        return new CostGradientTuple(cost, gradient);
      }
    };
    DenseDoubleVector v = new DenseDoubleVector(new double[] { 0, 1 });
    CostGradientTuple cost = inlineFunction.evaluateCost(v);
    DoubleVector numericalGradient = MathUtils.numericalGradient(v,
        inlineFunction);
    assertSmallDiff(numericalGradient, cost.getGradient());

    v = new DenseDoubleVector(new double[] { -15, 100 });
    cost = inlineFunction.evaluateCost(v);

    numericalGradient = MathUtils.numericalGradient(v, inlineFunction);
    assertSmallDiff(numericalGradient, cost.getGradient());
  }
View Full Code Here

    DoubleVector foldGradient = DenseMatrixFolder.foldMatrix(gradient);

    // now add the prior and finalize the derivative
    cost += computeLogPrior(input, foldGradient);

    return new CostGradientTuple(cost, foldGradient);
  }
View Full Code Here

TOP

Related Classes of de.jungblut.math.minimize.CostGradientTuple

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.