Package org.apache.commons.math3.linear

Examples of org.apache.commons.math3.linear.RealMatrix


     * columns representing variable values.
     *
     * @param covariance Covariance instance
     */
    public PearsonsCorrelation(Covariance covariance) {
        RealMatrix covarianceMatrix = covariance.getCovarianceMatrix();
        if (covarianceMatrix == null) {
            throw new NullArgumentException(LocalizedFormats.COVARIANCE_MATRIX);
        }
        nObs = covariance.getN();
        correlationMatrix = covarianceToCorrelation(covarianceMatrix);
View Full Code Here


     * @param matrix matrix with columns representing variables to correlate
     * @return correlation matrix
     */
    public RealMatrix computeCorrelationMatrix(RealMatrix matrix) {
        int nVars = matrix.getColumnDimension();
        RealMatrix outMatrix = new BlockRealMatrix(nVars, nVars);
        for (int i = 0; i < nVars; i++) {
            for (int j = 0; j < i; j++) {
              double corr = correlation(matrix.getColumn(i), matrix.getColumn(j));
              outMatrix.setEntry(i, j, corr);
              outMatrix.setEntry(j, i, corr);
            }
            outMatrix.setEntry(i, i, 1d);
        }
        return outMatrix;
    }
View Full Code Here

     * @param covarianceMatrix the covariance matrix
     * @return correlation matrix
     */
    public RealMatrix covarianceToCorrelation(RealMatrix covarianceMatrix) {
        int nVars = covarianceMatrix.getColumnDimension();
        RealMatrix outMatrix = new BlockRealMatrix(nVars, nVars);
        for (int i = 0; i < nVars; i++) {
            double sigma = FastMath.sqrt(covarianceMatrix.getEntry(i, i));
            outMatrix.setEntry(i, i, 1d);
            for (int j = 0; j < i; j++) {
                double entry = covarianceMatrix.getEntry(i, j) /
                       (sigma * FastMath.sqrt(covarianceMatrix.getEntry(j, j)));
                outMatrix.setEntry(i, j, entry);
                outMatrix.setEntry(j, i, entry);
            }
        }
        return outMatrix;
    }
View Full Code Here

        measurementMatrixT = measurementMatrix.transpose();

        // check that the process and measurement noise matrices are not null
        // they will be directly accessed from the model as they may change
        // over time
        RealMatrix processNoise = processModel.getProcessNoise();
        MathUtils.checkNotNull(processNoise);
        RealMatrix measNoise = measurementModel.getMeasurementNoise();
        MathUtils.checkNotNull(measNoise);

        // set the initial state estimate to a zero vector if it is not
        // available from the process model
        if (processModel.getInitialStateEstimate() == null) {
            stateEstimation =
                new ArrayRealVector(transitionMatrix.getColumnDimension());
        } else {
            stateEstimation = processModel.getInitialStateEstimate();
        }

        if (transitionMatrix.getColumnDimension() != stateEstimation.getDimension()) {
            throw new DimensionMismatchException(transitionMatrix.getColumnDimension(),
                                                 stateEstimation.getDimension());
        }

        // initialize the error covariance to the process noise if it is not
        // available from the process model
        if (processModel.getInitialErrorCovariance() == null) {
            errorCovariance = processNoise.copy();
        } else {
            errorCovariance = processModel.getInitialErrorCovariance();
        }

        // sanity checks, the control matrix B may be null

        // A must be a square matrix
        if (!transitionMatrix.isSquare()) {
            throw new NonSquareMatrixException(
                    transitionMatrix.getRowDimension(),
                    transitionMatrix.getColumnDimension());
        }

        // row dimension of B must be equal to A
        if (controlMatrix != null &&
            controlMatrix.getRowDimension() > 0 &&
            controlMatrix.getColumnDimension() > 0 &&
            (controlMatrix.getRowDimension() != transitionMatrix.getRowDimension() ||
             controlMatrix.getColumnDimension() != 1)) {
            throw new MatrixDimensionMismatchException(controlMatrix.getRowDimension(),
                                                       controlMatrix.getColumnDimension(),
                                                       transitionMatrix.getRowDimension(), 1);
        }

        // Q must be equal to A
        MatrixUtils.checkAdditionCompatible(transitionMatrix, processNoise);

        // column dimension of H must be equal to row dimension of A
        if (measurementMatrix.getColumnDimension() != transitionMatrix.getRowDimension()) {
            throw new MatrixDimensionMismatchException(measurementMatrix.getRowDimension(),
                                                       measurementMatrix.getColumnDimension(),
                                                       measurementMatrix.getRowDimension(),
                                                       transitionMatrix.getRowDimension());
        }

        // row dimension of R must be equal to row dimension of H
        if (measNoise.getRowDimension() != measurementMatrix.getRowDimension() ||
            measNoise.getColumnDimension() != 1) {
            throw new MatrixDimensionMismatchException(measNoise.getRowDimension(),
                                                       measNoise.getColumnDimension(),
                                                       measurementMatrix.getRowDimension(), 1);
        }
    }
View Full Code Here

            throw new DimensionMismatchException(z.getDimension(),
                                                 measurementMatrix.getRowDimension());
        }

        // S = H * P(k) - * H' + R
        RealMatrix s = measurementMatrix.multiply(errorCovariance)
            .multiply(measurementMatrixT)
            .add(measurementModel.getMeasurementNoise());

        // invert S
        // as the error covariance matrix is a symmetric positive
        // semi-definite matrix, we can use the cholesky decomposition
        DecompositionSolver solver = new CholeskyDecomposition(s).getSolver();
        RealMatrix invertedS = solver.getInverse();

        // Inn = z(k) - H * xHat(k)-
        RealVector innovation = z.subtract(measurementMatrix.operate(stateEstimation));

        // calculate gain matrix
        // K(k) = P(k)- * H' * (H * P(k)- * H' + R)^-1
        // K(k) = P(k)- * H' * S^-1
        RealMatrix kalmanGain = errorCovariance.multiply(measurementMatrixT).multiply(invertedS);

        // update estimate with measurement z(k)
        // xHat(k) = xHat(k)- + K * Inn
        stateEstimation = stateEstimation.add(kalmanGain.operate(innovation));

        // update covariance of prediction error
        // P(k) = (I - K * H) * P(k)-
        RealMatrix identity = MatrixUtils.createRealIdentityMatrix(kalmanGain.getRowDimension());
        errorCovariance = identity.subtract(kalmanGain.multiply(measurementMatrix)).multiply(errorCovariance);
    }
View Full Code Here

        // -------------------- Generation Loop --------------------------------

        generationLoop:
            for (iterations = 1; iterations <= maxIterations; iterations++) {
                // Generate and evaluate lambda offspring
                RealMatrix arz = randn1(dimension, lambda);
                RealMatrix arx = zeros(dimension, lambda);
                double[] fitness = new double[lambda];
                // generate random offspring
                for (int k = 0; k < lambda; k++) {
                    RealMatrix arxk = null;
                    for (int i = 0; i < checkFeasableCount+1; i++) {
                        if (diagonalOnly <= 0) {
                            arxk = xmean.add(BD.multiply(arz.getColumnMatrix(k))
                                    .scalarMultiply(sigma)); // m + sig * Normal(0,C)
                        } else {
                            arxk = xmean.add(times(diagD,arz.getColumnMatrix(k))
                                    .scalarMultiply(sigma));
                        }
                        if (i >= checkFeasableCount || fitfun.isFeasible(arxk.getColumn(0))) {
                            break;
                        }
                        // regenerate random arguments for row
                        arz.setColumn(k, randn(dimension));
                    }
                    copyColumn(arxk, 0, arx, k);
                    try {
                        fitness[k] = fitfun.value(arx.getColumn(k)); // compute fitness
                    } catch (TooManyEvaluationsException e) {
                        break generationLoop;
                    }
                }
                // Sort by fitness and compute weighted mean into xmean
                int[] arindex = sortedIndices(fitness);
                // Calculate new xmean, this is selection and recombination
                RealMatrix xold = xmean; // for speed up of Eq. (2) and (3)
                RealMatrix bestArx = selectColumns(arx, MathArrays.copyOf(arindex, mu));
                xmean = bestArx.multiply(weights);
                RealMatrix bestArz = selectColumns(arz, MathArrays.copyOf(arindex, mu));
                RealMatrix zmean = bestArz.multiply(weights);
                boolean hsig = updateEvolutionPaths(zmean, xold);
                if (diagonalOnly <= 0) {
                    updateCovariance(hsig, bestArx, arz, arindex, xold);
                } else {
                    updateCovarianceDiagonalOnly(hsig, bestArz, xold);
View Full Code Here

        double[][] sigmaArray = new double[guess.length][1];
        for (int i = 0; i < guess.length; i++) {
            final double range =  (boundaries == null) ? 1.0 : boundaries[1][i] - boundaries[0][i];
            sigmaArray[i][0]   = ((inputSigma == null) ? 0.3 : inputSigma[i]) / range;
        }
        RealMatrix insigma = new Array2DRowRealMatrix(sigmaArray, false);
        sigma = max(insigma); // overall standard deviation

        // initialize termination criteria
        stopTolUpX = 1e3 * max(insigma);
        stopTolX = 1e-11 * max(insigma);
        stopTolFun = 1e-12;
        stopTolHistFun = 1e-13;

        // initialize selection strategy parameters
        mu = lambda / 2; // number of parents/points for recombination
        logMu2 = Math.log(mu + 0.5);
        weights = log(sequence(1, mu, 1)).scalarMultiply(-1.).scalarAdd(logMu2);
        double sumw = 0;
        double sumwq = 0;
        for (int i = 0; i < mu; i++) {
            double w = weights.getEntry(i, 0);
            sumw += w;
            sumwq += w * w;
        }
        weights = weights.scalarMultiply(1. / sumw);
        mueff = sumw * sumw / sumwq; // variance-effectiveness of sum w_i x_i

        // initialize dynamic strategy parameters and constants
        cc = (4. + mueff / dimension) /
                (dimension + 4. + 2. * mueff / dimension);
        cs = (mueff + 2.) / (dimension + mueff + 3.);
        damps = (1. + 2. * Math.max(0, Math.sqrt((mueff - 1.) /
                (dimension + 1.)) - 1.)) *
                Math.max(0.3, 1. - dimension /
                        (1e-6 + Math.min(maxIterations, getMaxEvaluations() /
                                lambda))) + cs; // minor increment
        ccov1 = 2. / ((dimension + 1.3) * (dimension + 1.3) + mueff);
        ccovmu = Math.min(1 - ccov1, 2. * (mueff - 2. + 1. / mueff) /
                ((dimension + 2.) * (dimension + 2.) + mueff));
        ccov1Sep = Math.min(1, ccov1 * (dimension + 1.5) / 3.);
        ccovmuSep = Math.min(1 - ccov1, ccovmu * (dimension + 1.5) / 3.);
        chiN = Math.sqrt(dimension) *
                (1. - 1. / (4. * dimension) + 1 / (21. * dimension * dimension));
        // intialize CMA internal values - updated each generation
        xmean = MatrixUtils.createColumnRealMatrix(guess); // objective
                                                           // variables
        diagD = insigma.scalarMultiply(1. / sigma);
        diagC = square(diagD);
        pc = zeros(dimension, 1); // evolution paths for C and sigma
        ps = zeros(dimension, 1); // B defines the coordinate system
        normps = ps.getFrobeniusNorm();

View Full Code Here

     */
    private void updateCovariance(boolean hsig, final RealMatrix bestArx,
            final RealMatrix arz, final int[] arindex, final RealMatrix xold) {
        double negccov = 0;
        if (ccov1 + ccovmu > 0) {
            RealMatrix arpos = bestArx.subtract(repmat(xold, 1, mu))
                    .scalarMultiply(1. / sigma); // mu difference vectors
            RealMatrix roneu = pc.multiply(pc.transpose())
                    .scalarMultiply(ccov1); // rank one update
            // minor correction if hsig==false
            double oldFac = hsig ? 0 : ccov1 * cc * (2. - cc);
            oldFac += 1. - ccov1 - ccovmu;
            if (isActiveCMA) {
                // Adapt covariance matrix C active CMA
                negccov = (1. - ccovmu) * 0.25 * mueff /
                (Math.pow(dimension + 2., 1.5) + 2. * mueff);
                double negminresidualvariance = 0.66;
                // keep at least 0.66 in all directions, small popsize are most
                // critical
                double negalphaold = 0.5; // where to make up for the variance
                                          // loss,
                // prepare vectors, compute negative updating matrix Cneg
                int[] arReverseIndex = reverse(arindex);
                RealMatrix arzneg
                    = selectColumns(arz, MathArrays.copyOf(arReverseIndex, mu));
                RealMatrix arnorms = sqrt(sumRows(square(arzneg)));
                int[] idxnorms = sortedIndices(arnorms.getRow(0));
                RealMatrix arnormsSorted = selectColumns(arnorms, idxnorms);
                int[] idxReverse = reverse(idxnorms);
                RealMatrix arnormsReverse = selectColumns(arnorms, idxReverse);
                arnorms = divide(arnormsReverse, arnormsSorted);
                int[] idxInv = inverse(idxnorms);
                RealMatrix arnormsInv = selectColumns(arnorms, idxInv);
                // check and set learning rate negccov
                double negcovMax = (1. - negminresidualvariance) /
                        square(arnormsInv).multiply(weights).getEntry(0, 0);
                if (negccov > negcovMax) {
                    negccov = negcovMax;
                }
                arzneg = times(arzneg, repmat(arnormsInv, dimension, 1));
                RealMatrix artmp = BD.multiply(arzneg);
                RealMatrix Cneg = artmp.multiply(diag(weights)).multiply(
                        artmp.transpose());
                oldFac += negalphaold * negccov;
                C = C.scalarMultiply(oldFac)
                        // regard old matrix
                        .add(roneu)
                        // plus rank one update
                        .add(arpos.scalarMultiply(
                                // plus rank mu update
                                ccovmu + (1. - negalphaold) * negccov)
                                .multiply(
                                        times(repmat(weights, 1, dimension),
                                                arpos.transpose())))
                        .subtract(Cneg.scalarMultiply(negccov));
            } else {
                // Adapt covariance matrix C - nonactive
                C = C.scalarMultiply(oldFac) // regard old matrix
                        .add(roneu)
                        // plus rank one update
View Full Code Here

        // solve the rectangular system in the least square sense
        // to get the best estimate of the Nordsieck vector [s2 ... sk]
        QRDecomposition decomposition;
        decomposition = new QRDecomposition(new Array2DRowRealMatrix(a, false));
        RealMatrix x = decomposition.getSolver().solve(new Array2DRowRealMatrix(b, false));
        return new Array2DRowRealMatrix(x.getData(), false);
    }
View Full Code Here

    public void testTransitionMeasurementMatrixMismatch() {
       
        // A and H matrix do not match in dimensions
       
        // A = [ 1 ]
        RealMatrix A = new Array2DRowRealMatrix(new double[] { 1d });
        // no control input
        RealMatrix B = null;
        // H = [ 1 1 ]
        RealMatrix H = new Array2DRowRealMatrix(new double[] { 1d, 1d });
        // Q = [ 0 ]
        RealMatrix Q = new Array2DRowRealMatrix(new double[] { 0 });
        // R = [ 0 ]
        RealMatrix R = new Array2DRowRealMatrix(new double[] { 0 });

        ProcessModel pm
            = new DefaultProcessModel(A, B, Q,
                                      new ArrayRealVector(new double[] { 0 }), null);
        MeasurementModel mm = new DefaultMeasurementModel(H, R);
View Full Code Here

TOP

Related Classes of org.apache.commons.math3.linear.RealMatrix

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.