package InfoCollection.IndependentNormalVarianceUnknown;
import InfoCollection.NormalTruth;
import InfoCollection.SamplingRule;
import InfoCollection.TestExamples;
import InfoCollection.util.MathPF;
public class LL1Simple extends BayesSamplingRule {
public LL1Simple() {
super();
}
public LL1Simple(Belief b) {
super(b);
}
public int GetMeasurementDecision() throws Exception {
/* Breaks ties randomly. */
if (belief.IsIntegrable()) {
double[] logkg = KG.LogKGfactor(belief);
// System.out.printf("logKG=%s\n", Arrays.toString(logkg));
return MathPF.argmax(logkg, rnd);
} else
return belief.ChooseRandomNotIntegrable(rnd);
}
/* GetLogQ() throws an exception if the belief isn't integrable. */
public double[] GetLogQ() throws Exception {
return KG.LogKGfactor(belief);
}
@Override
public String toString() {
return "LL1Simple";
}
public static void main(String args[]) throws Exception {
RegressionTest();
}
public static void RegressionTest() {
test1();
// test2(); not good for regression
test3();
}
/*
* Just run it on example 1 to see if there are any errors.
*/
public static boolean test1() {
try {
TestExamples.Example1(new LL1Simple(), false);
} catch (Exception e) {
e.printStackTrace();
System.out.println("test1: FAILED");
return false;
}
System.out.println("test1: OK");
return true;
}
/*
* Run it on example 1 with a KG stopping rule to see if there are any
* errors. It also reports the results for graphing. This test can take a
* long time and isn't good for regression.
*/
public static boolean test2() {
try {
TestExamples.TestStopping(new LL1Simple(), KGStoppingRule.Seq());
} catch (Exception e) {
e.printStackTrace();
System.out.println("test2: FAILED");
return false;
}
System.out.println("test2: OK");
return true;
}
/*
* Compare this implementation of LL1 to the KG implementation with variance
* known by giving LL1 a prior where the sampling precisions are essentially
* known under the prior. The algorithms should be identical under this
* prior, except for the tie-breaking which LL1 does randomly. There is
* often lots of tie-breaking because the best and the second-best often
* have the same number of measurements applied to them, and hence the same
* KG factor.
*
* PF: Something I could do to get better automatic error checking is have
* another version of GetMeasurementDecision which returns a set from which
* the measurement decision is supposed to chosen randomly. Then, in
* CheckEquivalence, I could compare the sets and see if they have non-empty
* intersection.
*/
public static boolean test3() {
NormalTruth truth = NormalTruth.SlippageExample();
Belief b = new Belief(truth.SamplingPrecision());
SamplingRule p1, p2;
p1 = new LL1Simple(b);
// passing 1 to the variance-known version turns on error checking.
p2 = new InfoCollection.IndependentNormalVarianceKnown.KG(1);
int numDisagreements = 0;
try {
numDisagreements = TestExamples.CheckEquivalence(p1, p2, truth,
false);
} catch (Exception e) {
e.printStackTrace();
System.out.println("test3: FAILED");
return false;
}
if (numDisagreements < 25) {
System.out.printf("test3: OK (numDisagreements=%d)\n",
numDisagreements);
return true;
} else {
System.out.printf("test3: FAILED (numDisagreements=%d)\n",
numDisagreements);
return false;
}
}
};