for(List<Label> seq:sMinus)
pta.paths.augmentPTA(seq,false,false,null);
final MarkovModel m= new MarkovModel(3,true,true,false);
new MarkovClassifier(m, pta).updateMarkov(false);// construct Markov chain if asked for.
final ConsistencyChecker checker = new MarkovClassifier.DifferentPredictionsInconsistencyNoBlacklistingIncludeMissingPrefixes();
pta.clearColours();
EDSM_MarkovLearner learner = new EDSM_MarkovLearner(learnerInitConfiguration,pta,0) {
@Override
public Stack<PairScore> ChooseStatePairs(LearnerGraph graph)
{
reportLearningProgress(graph,message,ref,counter);
// resume learning.
return super.ChooseStatePairs(graph);
}
};
learner.setMarkov(m);learner.setChecker(checker);
learner.setUseNewScoreNearRoot(false);learner.setUseClassifyPairs(false);
learner.setDisableInconsistenciesInMergers(false);
if (learnerInitConfiguration.graph != null)
{
learnerInitConfiguration.graph.clearColours();learnerInitConfiguration.graph.getInit().setColour(JUConstants.RED);
LearnerGraph.copyGraphs(learnerInitConfiguration.graph,learner.getTentativeAutomaton());
}
LearnerGraph graphLearnt = learner.learnMachine(new LinkedList<List<Label>>(),new LinkedList<List<Label>>());
outcome = new OtpErlangTuple(new OtpErlangObject[]{ref,msgOk, constructFSM(graphLearnt)});
}
catch(AskedToTerminateException e)
{
outcome = new OtpErlangTuple(new OtpErlangObject[]{ref,msgTerminate});
}
catch(Throwable ex)
{
ex.printStackTrace();
outcome = new OtpErlangTuple(new OtpErlangObject[]{ref,msgFailure,new OtpErlangList(ex.getMessage())});
}
mbox.send(erlangPartner,outcome);
}
else
// Args: Ref,learn, pid
// pid is optional, where provided, progress messages are reported in a form of {Ref,'status',step}
// in the course of learning, the learner is receptive to messages directed at its normal PID, a {Ref,terminate} command will kill it and the response will be {Ref,terminate}.
// Response: Ref,ok,fsm
// on error: Ref,failure,text_of_the_error (as string)
if (command.equals(msgLearnEDSMMARKOVcentre) && message.arity() >= 2)
{
OtpErlangObject outcome = null;
try
{
final AtomicLong counter = new AtomicLong();
learnerInitConfiguration.config.setLearnerScoreMode(ScoreMode.ONLYOVERRIDE);
LearnerGraph ptaInitial=new LearnerGraph(learnerInitConfiguration.config);
for(List<Label> seq:sPlus)
ptaInitial.paths.augmentPTA(seq,true,false,null);
for(List<Label> seq:sMinus)
ptaInitial.paths.augmentPTA(seq,false,false,null);
final MarkovModel m= new MarkovModel(3,true,true,false);
final MarkovClassifier ptaClassifier = new MarkovClassifier(m, ptaInitial);ptaClassifier.updateMarkov(false);
LearnerGraph ptaToUseForInference = ptaInitial;
final ConsistencyChecker checker = new MarkovClassifier.DifferentPredictionsInconsistencyNoBlacklistingIncludeMissingPrefixes();
{
Collection<Set<CmpVertex>> verticesToMergeBasedOnInitialPTA=null;
final List<List<Label>> pathsToMerge=ptaClassifier.identifyPathsToMerge(checker);
// These vertices are merged first and then the learning start from the root as normal.
// The reason to learn from the root is a memory cost. if we learn from the middle, we can get a better results