* This allows all the data to be processed once, instead of n times for each object.
*/
protected void processDataResults(AbstractSession session) {
this.dataResultsByPrimaryKey = new HashMap();
int size = this.dataResults.size();
CacheKey lastCacheKey = null;
List childRows = null;
ObjectBuilder builder = getDescriptor().getObjectBuilder();
int parentIndex = getParentResultIndex();
for (int dataResultsIndex = 0; dataResultsIndex < size; dataResultsIndex++) {
AbstractRecord row = this.dataResults.get(dataResultsIndex);
AbstractRecord parentRow = row;
// Must adjust for the parent index to ensure the correct pk is extracted.
if (parentIndex > 0) {
Vector trimedFields = new NonSynchronizedSubVector(row.getFields(), parentIndex, row.size());
Vector trimedValues = new NonSynchronizedSubVector(row.getValues(), parentIndex, row.size());
parentRow = new DatabaseRecord(trimedFields, trimedValues);
}
// Extract the primary key of the source object, to filter only the joined rows for that object.
Vector sourceKey = builder.extractPrimaryKeyFromRow(parentRow, session);
// May be any outer-join so ignore null.
if (sourceKey != null) {
CacheKey sourceCacheKey = new CacheKey(sourceKey);
if ((lastCacheKey != null) && lastCacheKey.equals(sourceCacheKey)) {
childRows.add(row);
if (shouldFilterDuplicates()) {
// Also null out the row because it is a duplicate to avoid object building processing it.
this.dataResults.set(dataResultsIndex, null);