LearnerEvaluationConfiguration learnerEval = new LearnerEvaluationConfiguration(config);learnerEval.setLabelConverter(converter);
int states = referenceGraph.getAcceptStateNumber();
final Collection<List<Label>> testSet = PaperUAS.computeEvaluationSet(referenceGraph,states*3,PairQualityLearner.makeEven(states*referenceGraph.pathroutines.computeAlphabet().size()));
DifferenceToReferenceDiff differenceStructural=DifferenceToReferenceDiff.estimationOfDifferenceDiffMeasure(referenceGraph, learntGraph, config, 1);
DifferenceToReferenceLanguageBCR differenceBCRlearnt=DifferenceToReferenceLanguageBCR.estimationOfDifference(referenceGraph, learntGraph,testSet);
final MarkovModel m= new MarkovModel(chunkLen,true,true,false);
LearnerGraph pta=new LearnerGraph(config);
for(List<Label> seq:sPlus)
pta.paths.augmentPTA(seq,true,false,null);
for(List<Label> seq:sMinus)
pta.paths.augmentPTA(seq,false,false,null);
pta.clearColours();
new MarkovClassifier(m, pta).updateMarkov(false);// construct Markov chain
// For Markov, we do not need to learn anything at all - our Markov matrix contains enough information to classify paths and hence compare it to the reference graph.
ConfusionMatrix mat = DiffExperiments.classifyAgainstMarkov(testSet, referenceGraph, m);
DifferenceToReferenceLanguageBCR differenceBCRMarkov = new DifferenceToReferenceLanguageBCR(mat);
return new OtpErlangTuple(new OtpErlangObject[]{
new OtpErlangDouble(differenceStructural.getValue()),new OtpErlangDouble(differenceBCRlearnt.getValue()),new OtpErlangDouble(differenceBCRMarkov.getValue())
});
}