double total_memory_used = 0.0;
boolean calculate_memory = (hints.force_replication_size_limit != null && hints.max_memory_per_partition != 0);
for (Table catalog_tbl : CatalogUtil.getDataTables(info.catalogContext.database)) {
String table_key = CatalogKey.createKey(catalog_tbl);
TableEntry pentry = null;
Column col = null;
Collection<Column> pkey_columns = CatalogUtil.getPrimaryKeyColumns(catalog_tbl);
TableStatistics ts = info.stats.getTableStatistics(catalog_tbl);
assert (ts != null) : "Null TableStatistics for " + catalog_tbl;
double size_ratio = (calculate_memory ? (ts.tuple_size_total / (double) hints.max_memory_per_partition) : 0);
// Replication
if (hints.force_replication.contains(table_key) || (calculate_memory && ts.readonly && size_ratio <= hints.force_replication_size_limit) || pkey_columns.isEmpty()) {
total_memory_used += size_ratio;
if (debug.val)
LOG.debug("Choosing " + catalog_tbl.getName() + " for replication");
col = ReplicatedColumn.get(catalog_tbl);
pentry = new TableEntry(PartitionMethodType.REPLICATION, col, null, null);
// Hash Primary Key
} else {
total_memory_used += (size_ratio / (double) info.getNumPartitions());
if (hints.enable_multi_partitioning == false || pkey_columns.size() == 1) {
col = CollectionUtil.first(pkey_columns);
pentry = new TableEntry(PartitionMethodType.HASH, col, null, null);
} else {
col = MultiColumn.get(pkey_columns.toArray(new Column[0]));
pentry = new TableEntry(PartitionMethodType.HASH, col, null, null);
}
assert (pentry.attribute != null) : catalog_tbl;
}
if (debug.val)
LOG.debug(String.format("[%02d] %s", pplan.getTableCount(), col.fullName()));
pplan.getTableEntries().put(catalog_tbl, pentry);
} // FOR
assert (total_memory_used <= 100) : "Too much memory per partition: " + total_memory_used;
if (hints.enable_procparameter_search) {