* Main training procedure
*/
@Override
public void train() {
DataModel datamodel = getDataModel();
initBias();
initPQ();
int nu = 0; // the number of items user u rated
for (int iter = 0; iter < iteration; iter++) {
double rmse = 0.0;
int n = 0;
IntArrayList users = (IntArrayList) datamodel.getUserIDs();
for (int u = 0; u < users.size(); u++) {
// initialize sumQE
// why use it???
PreferenceArray sumQE = new GenericUserPreferenceArray(
parameter_k);
// get userid
int userid = users.get(u);
nu = datamodel.getNumOfItemsRateByUser(userid);
Vector tmpitems = datamodel.getVectorOfItems(userid);
// calculate temp_Pu += sigma(yi)/sqrt(Nu);
PreferenceArray tempUserFactor = puTemp.get(userid);
PreferenceArray UserFactor = pusers.get(userid);
for (int k = 0; k < parameter_k; k++) {
Iterator<Element> itor = tmpitems.iterateNonZero();
double sum = 0.0;
while (itor.hasNext()) {
Element e = itor.next();
int itemid = e.index();
sum = sum + y.get(itemid).getValue(k);
}
float temp = (float) (UserFactor.getValue(k) + sum
/ Math.sqrt(nu));
tempUserFactor.setValue(k, temp);
sumQE.setValue(k, 0.0f);
}
// iterate to deal with items
Iterator<Element> itor = tmpitems.iterateNonZero();
while (itor.hasNext()) {
Element e = itor.next();
int itemid = e.index();
// actual rating and estimated rating
double rui = datamodel.getPreferenceValue(userid, itemid);
double pui = predictPreference(userid, itemid);
double eui = rui - pui;
rmse += eui * eui;
n++;