public void run() throws Exception {
for(int i = 0; i < nodeOrderQueue.size(); i++) {
Node node = nodeOrderQueue.get(i);
List<Node> discreteParentList = new ArrayList<Node>();
List<Node> continuousParentList = new ArrayList<Node>();
for (Node parentNode : clonedPN.getNode(node.getName()).getParents()) {
if (parentNode.getType() == Node.PROBABILISTIC_NODE_TYPE) {
discreteParentList.add(parentNode);
} else if (parentNode.getType() == Node.CONTINUOUS_NODE_TYPE) {
continuousParentList.add(parentNode);
}
}
SortUtil.sortNodeListByName(discreteParentList);
SortUtil.sortNodeListByName(continuousParentList);
// The max of possible networks to be compiled to get its posterior is the
// number of discrete parents this continuous node has.
// But there might be two parent nodes in the same PN, in that case the network used
// will be the same.
// The purpose of creating a network with all non-continuous nodes connected to a
// parent of the current continuous node is to come up with its posterior distribution.
// This is a hybrid approach. We use Junction Tree where possible (discrete nodes) and
// Weighted Gaussian Sum for the rest (continuous nodes).
// Initializes all discrete nodes as not visited.
Map<String, Boolean> nodeVisitedBeforeMap = new HashMap<String, Boolean>();
for (Node discreteNode : pn.getNodes()) {
if (discreteNode.getType() == Node.PROBABILISTIC_NODE_TYPE) {
nodeVisitedBeforeMap.put(discreteNode.getName(), false);
}
}
List<Node> nodeInNetworkList;
boolean nodeVisitedBefore;
for (int j = 0; j < discreteParentList.size(); j++) {
nodeVisitedBefore = nodeVisitedBeforeMap.get(discreteParentList.get(j).getName());
if (!nodeVisitedBefore) {
nodeInNetworkList = new ArrayList<Node>();
addAdjacentNodes(clonedPN.getNode(discreteParentList.get(j).getName()), nodeInNetworkList);
List<Node> nodeToRemoveList = new ArrayList<Node>();
for (Node nodeToRemove : clonedPN.getNodes()) {
if (!nodeInNetworkList.contains(nodeToRemove)) {
nodeToRemoveList.add(nodeToRemove);
}
}
for (Node nodeToRemove : nodeToRemoveList) {
clonedPN.removeNode(nodeToRemove);
}
// Add the calculated marginal to the initial network (pn).
// We already know that every node here is discrete.
clonedPN.compile();
for (Node nodeToGetMarginal : clonedPN.getNodes()) {
TreeVariable variableToGetMarginal = (TreeVariable)nodeToGetMarginal;
TreeVariable variable = (TreeVariable)pn.getNode(nodeToGetMarginal.getName());
float[] values = new float[variable.getStatesSize()];
for (int stateIndex = 0; stateIndex < variable.getStatesSize(); stateIndex++) {
values[stateIndex] = variableToGetMarginal.getMarginalAt(stateIndex);
}
variable.initMarginalList();
variable.addLikeliHood(values);
// Add its name to the list of already visited nodes.
nodeVisitedBeforeMap.put(nodeToGetMarginal.getName(), true);
}
clonedPN = clonePN(this.pn);
}
}
// Now we have the posterior of all parents of the current continuous node.
// Calculate Weighted Gaussian Sum (from Symbolic Probabilistic Inference with both
// Discrete and Continuous Variables, appendix C)
// First lets calculate the mean SumOf(Prob[Parents(node)] * PartialMean), for every
// normal distribution function possible (combination of parents' states).
CNNormalDistribution cDistribution = ((ContinuousNode)node).getCnNormalDistribution();
double[] partialMeanList = new double[cDistribution.functionSize()];
double[] partialVarianceList = new double[cDistribution.functionSize()];
double[] probabilityList = new double[cDistribution.functionSize()];
double weightedMean = 0.0;
for (int ndfIndex = 0; ndfIndex < cDistribution.functionSize(); ndfIndex++) {
// Each normal distribution function has the mean SumOf(PartialMean), for every normal
// distribution in the function (one for each continuous parent and one for the noise
// normal distribution). As each continuous parent distribution is multiplied by a
// constant, its PartialMean = constant * MeanWithoutConstant.
// First we add the mean of the noise normal distribution.
partialMeanList[ndfIndex] = cDistribution.getMean(ndfIndex);
// Each normal distribution function has the variance SumOf(PartialVariance), for every normal
// distribution in the function (one for each continuous parent and one for the noise
// normal distribution). As each continuous parent distribution is multiplied by a
// constant, its PartialVariance = constant^2 * VarianceWithoutConstant.
// For the variance, we first add the variance of the noise normal distribution.
partialVarianceList[ndfIndex] = cDistribution.getVariance(ndfIndex);
// Then, for each continuous parent we add constant * MeanWithoutConstant for the PartialMean
// and constant^2 * VarianceWithoutConstant for the PartialVariance.
double meanWithoutConstant;
double varianceWithoutConstant;
for (int parentIndex = 0; parentIndex < cDistribution.getContinuousParentList().size(); parentIndex++) {
TreeVariable variable = (TreeVariable)cDistribution.getContinuousParentList().get(parentIndex);
// By the time we get here, the continuous parent already calculated its mean and variance previously.
meanWithoutConstant = variable.getMarginalAt(ContinuousNode.MEAN_MARGINAL_INDEX);
varianceWithoutConstant = variable.getMarginalAt(ContinuousNode.VARIANCE_MARGINAL_INDEX);
partialMeanList[ndfIndex] += cDistribution.getConstantAt(parentIndex, ndfIndex) * meanWithoutConstant;
partialVarianceList[ndfIndex] += Math.pow(cDistribution.getConstantAt(parentIndex, ndfIndex), 2) * varianceWithoutConstant;
}
// Now we get the configuration of its parents states to calculate its probability.
int[] parentsStatesConfiguration = cDistribution.getMultidimensionalCoord(ndfIndex);
probabilityList[ndfIndex] = 1.0;
for (int parentIndex = 0; parentIndex < parentsStatesConfiguration.length; parentIndex++) {
probabilityList[ndfIndex] *= ((TreeVariable)pn.getNode(discreteParentList.get(parentIndex).getName())).getMarginalAt(parentsStatesConfiguration[parentIndex]);
}
// Finally, calculate the weighted gaussian sum SumOf(Prob[Parents(node)] * PartialMean).
weightedMean += probabilityList[ndfIndex] * partialMeanList[ndfIndex];
// We can only calculate the weightedVariance after we have the final result
// for the weightedMean.
}
// Now that we have the final weightedMean, we can calculate the weightedVariance.
// WeightedVariance = SumOf(Prob[Parents(node)] * (PartialVariance + PartialMean^2 - WeightedMean^2))
double weightedVariance = 0.0;
for (int ndfIndex = 0; ndfIndex < cDistribution.functionSize(); ndfIndex++) {
weightedVariance += probabilityList[ndfIndex] * (partialVarianceList[ndfIndex] + Math.pow(partialMeanList[ndfIndex], 2) - Math.pow(weightedMean, 2));
}
// Add the mean and variance as its marginal in the TreeVariable.
float[] values = new float[node.getStatesSize()];
values[ContinuousNode.MEAN_MARGINAL_INDEX] = (float)weightedMean;
values[ContinuousNode.VARIANCE_MARGINAL_INDEX] = (float)weightedVariance;
((TreeVariable)node).initMarginalList();
((TreeVariable)node).addLikeliHood(values);
}