* solver.
*/
Matrix corpusProjections = new DenseMatrix(corpus.numRows(), desiredRank);
TrainingState state = new TrainingState(eigens, corpusProjections);
for (int i = 0; i < desiredRank; i++) {
Vector currentEigen = new DenseVector(cols);
Vector previousEigen = null;
while (hasNotConverged(currentEigen, corpus, state)) {
int randomStartingIndex = getRandomStartingIndex(corpus, eigens);
Vector initialTrainingVector = corpus.getRow(randomStartingIndex);
state.setTrainingIndex(randomStartingIndex);
updater.update(currentEigen, initialTrainingVector, state);
for (int corpusRow = 0; corpusRow < corpus.numRows(); corpusRow++) {
state.setTrainingIndex(corpusRow);
if (corpusRow != randomStartingIndex)
updater.update(currentEigen, corpus.getRow(corpusRow), state);
}
state.setFirstPass(false);
if (debug) {
if (previousEigen == null) {
previousEigen = currentEigen.clone();
} else {
double dot = currentEigen.dot(previousEigen);
if (dot > 0) {
dot /= (currentEigen.norm(2) * previousEigen.norm(2));
}
// log.info("Current pass * previous pass = {}", dot);
}
}
}
// converged!
double eigenValue = state.getStatusProgress().get(state.getStatusProgress().size() - 1).getEigenValue();
// it's actually more efficient to do this to normalize than to call currentEigen = currentEigen.normalize(),
// because the latter does a clone, which isn't necessary here.
currentEigen.assign(new TimesFunction(), 1 / currentEigen.norm(2));
eigens.assignRow(i, currentEigen);
eigenValues.add(eigenValue);
state.setCurrentEigenValues(eigenValues);
log.info("Found eigenvector {}, eigenvalue: {}", i, eigenValue);