Package gov.sandia.cognition.statistics.distribution

Examples of gov.sandia.cognition.statistics.distribution.MultivariateGaussian


    final double betaPriorCovDof = 2 + betaCovPriorMean.getDimensionality();
    final ScaledInverseGammaCovDistribution priorBetaCov =
        new ScaledInverseGammaCovDistribution(betaCovPriorMean.getDimensionality()
          betaCovPriorMean.scale(betaPriorCovDof - 1d).getElement(0),
          betaPriorCovDof);
    final MultivariateGaussian priorBeta =
        new MultivariateGaussian(VectorFactory.getDenseDefault().copyArray(
            new double[] {0d}), priorBetaCov.getMean());

    /*
     * Create and initialize the PL filter
     */
 
View Full Code Here


    @Override
    public double computeLogLikelihood(
      GaussianArHpWfParticle transState,
      ObservedValue<Vector,?> observation) {

      final MultivariateGaussian priorPredState = transState.getState();
      final KalmanFilter kf = transState.getFilter();
      /*
       * Construct the measurement prior predictive likelihood
       */
      final Vector mPriorPredMean = kf.getModel().getC().times(priorPredState.getMean());
      final Matrix mPriorPredCov = kf.getModel().getC().times(priorPredState.getCovariance())
          .times(kf.getModel().getC().transpose())
          .plus(kf.getMeasurementCovariance());
      final MultivariateGaussian mPriorPredDist = new MultivariateGaussian(
          mPriorPredMean, mPriorPredCov);

      final double logCt = mPriorPredDist.getProbabilityFunction().logEvaluate(
          observation.getObservedValue());

      return logCt;
    }
View Full Code Here

         * As well, we need to set/reset the kalman filters to adhere
         * to the intended model.
         */
        final double scaleSample = thisPriorScale.sample(this.rng);

        final MultivariateGaussian thisPriorOffset = initialPriorPsi.clone();

        final Vector systemSample = thisPriorOffset.sample(this.rng);
        final Vector offsetTerm = systemSample.subVector(0,
            systemSample.getDimensionality()/2 - 1);
        thisKf.getModel().setState(offsetTerm);
        thisKf.setCurrentInput(offsetTerm);

        final Matrix A = MatrixFactory.getDefault().createDiagonal(
            systemSample.subVector(
                systemSample.getDimensionality()/2,
                systemSample.getDimensionality() - 1));
        thisKf.getModel().setA(A);

        final Matrix offsetIdent = MatrixFactory.getDefault().createIdentity(
            systemSample.getDimensionality()/2, systemSample.getDimensionality()/2);
        thisKf.getModel().setB(offsetIdent);

        final Matrix measIdent = MatrixFactory.getDefault().createIdentity(
            thisKf.getModel().getOutputDimensionality(),
            thisKf.getModel().getOutputDimensionality());
        thisKf.setMeasurementCovariance(measIdent.scale(scaleSample));

        final Matrix modelIdent = MatrixFactory.getDefault().createIdentity(
            thisKf.getModel().getStateDimensionality(),
            thisKf.getModel().getStateDimensionality());
        thisKf.setModelCovariance(modelIdent.scale(scaleSample));

        final MultivariateGaussian priorState = thisKf.createInitialLearnedObject();
        final Vector priorStateSample = priorState.sample(this.rng);

        final GaussianArHpWfParticle particle =
            new GaussianArHpWfParticle(thisKf,
                ObservedValue.<Vector>create(0, null), priorState,
                priorStateSample,
View Full Code Here

              {0d, 0d},
              {0d, 0d}}),
          MatrixFactory.getDefault().copyArray(new double[][] {{0d}})   
        );
   
    final MultivariateGaussian trueInitialPrior = new MultivariateGaussian(
        VectorFactory.getDefault().copyValues(0.5d, 3d),
        MatrixFactory.getDefault().copyArray(new double[][] {
            {1d, 0d},
            {0d, 1d}}));
    final int N = 100;
    final List<ObservedValue<Vector, Matrix>> observations = Lists.newArrayList();
    final LogisticDistribution ev1Dist = new LogisticDistribution(0d, 1d);

    List<SimObservedValue<Vector, Matrix, Vector>> dlmSamples = DlmUtils.sampleDlm(
        rng, N, trueInitialPrior, initialFilter);
    for (SimObservedValue<Vector, Matrix, Vector> samplePair : dlmSamples) {
      final double ev1Upper = -Math.log(-Math.log(rng.nextDouble()));
      final double upperUtility = samplePair.getObservedValue().getElement(0) +
          ev1Upper;
      final double lowerUtility = -Math.log(-Math.log(rng.nextDouble()));
      final double obs = (upperUtility > lowerUtility) ? 1d : 0d;
      observations.add(
          SimObservedValue.<Vector, Matrix, LogitTrueState>create(
              VectorFactory.getDefault().copyValues(obs),
              samplePair.getObservedData(),
              new LogitTrueState(samplePair.getTrueState(), upperUtility, ev1Upper, lowerUtility)));
    }

    /*
     * Create and initialize the PL filter
     */
    final MultivariateGaussian initialPrior = new MultivariateGaussian(
        VectorFactory.getDefault().copyValues(0d, 0d),
        MatrixFactory.getDefault().copyArray(new double[][] {
            {10d, 0d},
            {0d, 10d}}));
    final Matrix F = MatrixFactory.getDefault().copyArray(new double[][] {
View Full Code Here

    @Override
    public GaussianArHpWfParticle update(
      GaussianArHpWfParticle predState) {

      final MultivariateGaussian posteriorState = predState.getState().clone();
      final KalmanFilter kf = predState.getFilter().clone();
      kf.update(posteriorState, predState.getObservation().getObservedValue());

      /*
       * The following are the parameter learning updates;
       * they can be done off-line, but we'll do them now.
       * TODO FIXME check that the input/offset thing is working!
       */
      final InverseGammaDistribution scaleSS = predState.getSigma2SS().clone();
      final MultivariateGaussian systemOffsetsSS = predState.getPsiSS().clone();

      final int xDim = posteriorState.getInputDimensionality();
      final Matrix Ij = MatrixFactory.getDefault().createIdentity(xDim, xDim);
      final Matrix H = MatrixFactory.getDefault().createMatrix(xDim, xDim * 2);
      H.setSubMatrix(0, 0, Ij);
      H.setSubMatrix(0, xDim, MatrixFactory.getDefault().createDiagonal(predState.getStateSample()));
      final Vector postStateSample = posteriorState.sample(this.rng);
      final MultivariateGaussian priorPhi = predState.getPsiSS();
      final Vector phiPriorSmpl = priorPhi.sample(this.rng);
      final Vector xHdiff = postStateSample.minus(H.times(phiPriorSmpl));

      final double newN = scaleSS.getShape() + 1d;
      final double d = scaleSS.getScale() + xHdiff.dotProduct(xHdiff);
     
      scaleSS.setScale(d);
      scaleSS.setShape(newN);
     
      // FIXME TODO: crappy sampler
      final double newScaleSmpl = scaleSS.sample(this.rng);
     
      /*
       * Update state and measurement covariances, which
       * have a strict dependency in this model (equality).
       */
      kf.setMeasurementCovariance(MatrixFactory.getDefault().createDiagonal(
          VectorFactory.getDefault().createVector(kf.getModel().getOutputDimensionality(),
              newScaleSmpl)));

      kf.setModelCovariance(MatrixFactory.getDefault().createDiagonal(
          VectorFactory.getDefault().createVector(kf.getModel().getStateDimensionality(),
              newScaleSmpl)));

      /*
       * Update offset and AR(1) prior(s).
       * Note that we divide out the previous scale param, since
       * we want to update A alone.
       */
      final Matrix priorAInv = priorPhi.getCovariance().scale(1d/predState.getSigma2Sample()).inverse();
      /*
       * TODO FIXME: we don't have a generalized outer product, so we're only
       * supporting the 1d case for now.
       */
      final Vector Hv = H.convertToVector();
      final Matrix postAInv = priorAInv.plus(Hv.outerProduct(Hv)).inverse();
      // TODO FIXME: ewww.  inverse.
      final Vector postPhiMean = postAInv.times(priorAInv.times(phiPriorSmpl).plus(
          H.transpose().times(postStateSample)));
      final MultivariateGaussian postPhi = systemOffsetsSS;
      postPhi.setMean(postPhiMean);
      postPhi.setCovariance(postAInv.scale(newScaleSmpl));
     
      final Vector postPhiSmpl = postPhi.sample(this.rng);
      final Matrix smplArTerms = MatrixFactory.getDefault().createDiagonal(
          postPhiSmpl.subVector(
              postPhiSmpl.getDimensionality()/2,
              postPhiSmpl.getDimensionality() - 1));
      kf.getModel().setA(smplArTerms);
View Full Code Here

    KalmanFilter kf = this.stateFilters.get(classId);
    final Vector mean = kf.getModel().getC().times(state.getMean());
    final Matrix cov = kf.getModel().getC().times(state.getCovariance())
        .times(kf.getModel().getC().transpose())
        .plus(kf.getMeasurementCovariance());
    final MultivariateGaussian likelihood = new MultivariateGaussian(
        mean, cov);
    return likelihood;
  }
View Full Code Here

      /*
       * Fruewirth-Schnatter's method for upper utility sampling, where
       * instead of sampling the predictors, we use the mean.
       */
      final MultivariateGaussian predictivePrior = particle.getLinearState().clone();
      KalmanFilter kf = particle.getRegressionFilter();
      final Matrix G = kf.getModel().getA();
      final Matrix F = data.getObservedData();
      predictivePrior.setMean(G.times(predictivePrior.getMean()));
      predictivePrior.setCovariance(
          G.times(predictivePrior.getCovariance()).times(G.transpose())
            .plus(kf.getModelCovariance()));
      final Vector betaSample =
          predictivePrior.sample(getRandom());
//          predictivePrior.getMean();
      final double predPriorObsMean =
            F.times(betaSample).getElement(0);

      final int particleCount;
      if (particleEntry.getValue() instanceof MutableDoubleCount) {
        particleCount = ((MutableDoubleCount)particleEntry.getValue()).count;
      } else {
        particleCount = 1;
      }
      for (int p = 0; p < particleCount; p++) {
        final double dSampledAugResponse = sampleAugResponse(predPriorObsMean, isOne);

        Vector sampledAugResponse = VectorFactory.getDefault().copyValues(dSampledAugResponse);

        for (int j = 0; j < 10; j++) {
          final LogitMixParticle predictiveParticle = particle.clone();
          predictiveParticle.setPreviousParticle(particle);
          predictiveParticle.setBetaSample(betaSample);
          predictiveParticle.setAugResponseSample(sampledAugResponse);
          predictiveParticle.setLinearState(predictivePrior);

          final UnivariateGaussian componentDist =
              this.evDistribution.getDistributions().get(j);

          predictiveParticle.setEVcomponent(componentDist);
         
          /*
           * Update the observed data for the regression component.
           */
          predictiveParticle.getRegressionFilter().getModel().setC(F);

          // TODO would be great to have a 1x1 matrix class here...
          final Matrix compVar = MatrixFactory.getDefault().copyArray(
              new double[][] {{componentDist.getVariance()}});
          predictiveParticle.getRegressionFilter().setMeasurementCovariance(compVar);
         
          final double compPredPriorObsMean =
               F.times(betaSample).getElement(0)
               + componentDist.getMean();
          final double compPredPriorObsCov =
               F.times(predictivePrior.getCovariance()).times(F.transpose()).getElement(0, 0)
               + componentDist.getVariance();
          predictiveParticle.setPriorPredMean(compPredPriorObsMean);
          predictiveParticle.setPriorPredCov(compPredPriorObsCov);

          final double logLikelihood =
View Full Code Here

    final Vector diffAugResponse =
        sampledAugResponse.minus(VectorFactory.getDefault().copyArray(
        new double[] {
            evComponent.getMean().doubleValue()
            }));
    final MultivariateGaussian posteriorState = updatedParticle.getLinearState().clone();
    filter.update(posteriorState,
        diffAugResponse);
    updatedParticle.setLinearState(posteriorState);
   
    return updatedParticle;
View Full Code Here

     */
    final DataDistribution<LassoRegressionDistribution> updatedDist =
        new DefaultDataDistribution<LassoRegressionDistribution>();
    for (final LassoRegressionDistribution particle : resampledParticles) {

      final MultivariateGaussian postBeta = particle.getPriorBeta().clone();

      final ScaledInverseGammaCovDistribution postObsCov = particle.getPriorObsCov();

      final Matrix augCovLassoSample = this.augLassoDist.sample(random);
      final Matrix obsCovSample = postObsCov.sample(random);
View Full Code Here

      final DataDistribution<LogitMixParticle> initialParticles =
          CountedDataDistribution.create(true);
      for (int i = 0; i < numParticles; i++) {
       
        final MultivariateGaussian initialPriorState = initialPrior.clone();
        final KalmanFilter kf = this.initialFilter.clone();
        final int componentId = this.rng.nextInt(10);
        final UnivariateGaussian evDist = this.evDistribution.
            getDistributions().get(componentId);
       
View Full Code Here

TOP

Related Classes of gov.sandia.cognition.statistics.distribution.MultivariateGaussian

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.