* Without ever completely computing (AX-Y). This is done by computing
* (AX-Y) on a row by row basis and then multiplying the row of (AX-Y) by
* the relevant parts of A' to update the final matrix.
*/
private Matrix computeGofX(SparseMatrix Y) {
Matrix G = new ArrayMatrix(numDimensions, X.columns());
// Iterate through each row of Y, and thux AX-Y. For each row, compute
// the difference between the two matrics. Then multiply the values in
// the correspond row of A against this difference matrix. Instead of
// computing the inner product of A' and (AX-Y), we update each k,m
// value in G by iterating through the columns of A.
for (int r = 0; r < Y.rows(); ++r) {
// Get the row vector of Y.
SparseDoubleVector v = Y.getRowVector(r);
double[] vDiff = new double[v.length()];
// Compute the difference between the row vector of AX and the row
// vector of Y. This is the straightforward dot product between A_i
// and X'_i.
for (int c = 0; c < X.columns(); c++) {
double sum = 0;
for (int k = 0; k < A.columns(); ++k)
sum += A.get(r, k) * X.get(k, c);
vDiff[c] = sum - v.get(c);
}
// Now get the row vector A_i and for each column k, multiply it
// against each column c in vDiff to get the difference in the
// gradient G_{k, c}.
for (int k = 0; k < A.columns(); ++k)
for (int c = 0; c < X.columns(); ++c)
G.set(k, c, G.get(k, c) + A.get(r, k) * vDiff[c]);
}
return G;
}