Package statechum.analysis.learning.observers.ProgressDecorator

Examples of statechum.analysis.learning.observers.ProgressDecorator.LearnerEvaluationConfiguration


    }

    try
    {
      Configuration config = mainConfiguration.copy();
      RPNIUniversalLearner l = new RPNIUniversalLearner(null,new LearnerEvaluationConfiguration(null,null,config,null,null));
      config.setLearnerIdMode(Configuration.IDMode.POSITIVE_NEGATIVE);
      l.init(plusStrings, minusStrings);
      actualC = l.getTentativeAutomaton().pathroutines.getGraph();
    }
    catch(IllegalArgumentException e)
    {
      // ignore this - it might be expected.
      eC = e;
    }

    try
    {
      Configuration config = mainConfiguration.copy();
      RPNIUniversalLearner l = new RPNIUniversalLearner(null,new LearnerEvaluationConfiguration(null,null,config,null,null));
      config.setLearnerIdMode(Configuration.IDMode.POSITIVE_NEGATIVE);
      PTASequenceEngine engine = buildPTA(plusStrings, minusStrings);
      checkPTAConsistency(engine, plusStrings, true);if (engine.numberOfLeafNodes()>0) checkPTAConsistency(engine, minusStrings, false);
      l.init(engine,0,0);
      actualD = l.getTentativeAutomaton().pathroutines.getGraph();
    }
    catch(IllegalArgumentException e)
    {
      // ignore this - it might be expected.
      eD = e;
    }

    try
    {
      Configuration config = mainConfiguration.copy();
      RPNIUniversalLearner l = new RPNIUniversalLearner(null,new LearnerEvaluationConfiguration(null,null,config,null,null));
      config.setLearnerIdMode(Configuration.IDMode.POSITIVE_NEGATIVE);
      l.init(buildPTA(plusStrings, buildSet(new String[][] {},config,converter)),0,0);
      for(List<Label> seq:minusStrings)
      {
        Set<List<Label>> negativeSeq = new HashSet<List<Label>>();negativeSeq.add(seq);
        l.getTentativeAutomaton().paths.augmentPTA(buildPTA(buildSet(new String[][] {},config,converter),negativeSeq));
      }
      actualE = l.getTentativeAutomaton().pathroutines.getGraph();
    }
    catch(IllegalArgumentException e)
    {
      // ignore this - it might be expected.
      eE = e;
    }

    try
    {
      Configuration config = mainConfiguration.copy();
      RPNIUniversalLearner l = new RPNIUniversalLearner(null,new LearnerEvaluationConfiguration(null,null,config,null,null));
      config.setLearnerIdMode(Configuration.IDMode.POSITIVE_NEGATIVE);
      l.getTentativeAutomaton().initPTA();
      l.getTentativeAutomaton().paths.augmentPTA(minusStrings, false,true);
      l.getTentativeAutomaton().paths.augmentPTA(plusStrings, true,true);
      actualF = l.getTentativeAutomaton().pathroutines.getGraph();
View Full Code Here


  /** Make sure that we can augment a graph with a single state which is a reject-state. */
  @Test
  public void testPTAconstruction_singleRejectState()
  {
    Configuration config = mainConfiguration.copy();
    RPNIUniversalLearner l = new RPNIUniversalLearner(null,new LearnerEvaluationConfiguration(null,null,config,null,null));
    config.setLearnerIdMode(Configuration.IDMode.POSITIVE_NEGATIVE);
    // set the initial state to be reject
    l.getTentativeAutomaton().initPTA();l.getTentativeAutomaton().getVertex(new LinkedList<Label>()).setAccept(false);
    // and check how augmentPTA works with such a PTA
    for(boolean maxAutomaton:new boolean[]{true,false})
View Full Code Here

  /** For a maximal automaton, a reject-path overrides an accept-one. This is a test with a single-state graph. */
  @Test
  public void testPTAconstruction_singleRejectState_max()
  {
    Configuration config = mainConfiguration.copy();
    RPNIUniversalLearner l = new RPNIUniversalLearner(null,new LearnerEvaluationConfiguration(null,null,config,null,null));
    config.setLearnerIdMode(Configuration.IDMode.POSITIVE_NEGATIVE);
    l.getTentativeAutomaton().initPTA();
    for(List<Label> sequence:buildSet(new String[][] { new String[]{} },config,converter))
      l.getTentativeAutomaton().paths.augmentPTA(sequence, false,true,null);
    for(List<Label> sequence:buildSet(new String[][] { },config,converter))
View Full Code Here

    }

    try
    {
      Configuration config = mainConfiguration.copy();
      RPNIUniversalLearner l = new RPNIUniversalLearner(null,new LearnerEvaluationConfiguration(null,null,config,null,null));
      config.setLearnerIdMode(Configuration.IDMode.POSITIVE_NEGATIVE);
      l.init(plusStrings, minusStrings);
      actualC = l.getTentativeAutomaton();
    }
    catch(IllegalArgumentException e)
    {
      // ignore this - it might be expected.
      eC = e;
    }
   
    try
    {
      Configuration config = mainConfiguration.copy();
      RPNIUniversalLearner l = new RPNIUniversalLearner(null,new LearnerEvaluationConfiguration(null,null,config,null,null));
      config.setLearnerIdMode(Configuration.IDMode.POSITIVE_NEGATIVE);
      PTASequenceEngine engine = buildPTA(plusStrings, minusStrings);
      checkPTAConsistency(engine, plusStrings, true);if (engine.numberOfLeafNodes()>0) checkPTAConsistency(engine, minusStrings, false);
      l.init(engine,0,0);
      actualD = l.getTentativeAutomaton();
    }
    catch(IllegalArgumentException e)
    {
      // ignore this - it might be expected.
      eD = e;
    }

    try
    {
      Configuration config = mainConfiguration.copy();
      RPNIUniversalLearner l = new RPNIUniversalLearner(null,new LearnerEvaluationConfiguration(null,null,config,null,null));
      config.setLearnerIdMode(Configuration.IDMode.POSITIVE_NEGATIVE);
      l.init(buildPTA(plusStrings, buildSet(new String[][] {},config,converter)),0,0);
      for(List<Label> seq:minusStrings)
      {  Set<List<Label>> negativeSeq = new HashSet<List<Label>>();negativeSeq.add(seq);
        l.getTentativeAutomaton().paths.augmentPTA(buildPTA(buildSet(new String[][] {},config,converter),negativeSeq));
      }
      actualE = l.getTentativeAutomaton();
    }
    catch(IllegalArgumentException e)
    {
      // ignore this - it might be expected.
      eE = e;
    }

    try
    {
      Configuration config = mainConfiguration.copy();
      RPNIUniversalLearner l = new RPNIUniversalLearner(null,new LearnerEvaluationConfiguration(null,null,config,null,null));
      config.setLearnerIdMode(Configuration.IDMode.POSITIVE_NEGATIVE);
      l.getTentativeAutomaton().initPTA();
      l.getTentativeAutomaton().paths.augmentPTA(minusStrings, false,true);
      l.getTentativeAutomaton().paths.augmentPTA(plusStrings, true,true);
      actualF = l.getTentativeAutomaton();
View Full Code Here

    for(boolean max:new boolean[]{true,false})
    {
      final boolean maxAutomaton = max;
      Configuration config = mainConfiguration.copy();
      final RPNIUniversalLearner l = new RPNIUniversalLearner(null,new LearnerEvaluationConfiguration(null,null,config,null,null));
      config.setLearnerIdMode(Configuration.IDMode.POSITIVE_NEGATIVE);
      l.getTentativeAutomaton().initPTA();
      l.getTentativeAutomaton().paths.augmentPTA(minusStrings, false,maxAutomaton);
      l.getTentativeAutomaton().paths.augmentPTA(plusStrings, true,maxAutomaton);
     
View Full Code Here

    try {
      Random rand = new Random(seed);
      module = targetmodule;
      setupFile(file, useOutputMatching, expand);
      learner = new ErlangOracleLearner(null,
          new LearnerEvaluationConfiguration(config));

      // Make sure our copy of the module is the same object as the
      // learner's so that alphabet mods work...
      module = learner.getModule();
View Full Code Here

      int depth, boolean useOutputMatching, EXPANSIONOFANY expand) {
    try {
      module = targetmodule;
      setupFile(file, useOutputMatching, expand);
      learner = new ErlangOracleLearner(null,
          new LearnerEvaluationConfiguration(config));

      // Make sure our copy of the module is the same object as the
      // learner's so that alphabet mods work...
      module = learner.getModule();
View Full Code Here

    ErlangRuntime.getDefaultRuntime().killErlang();
  }
 
  public void testLockerLearning(Configuration configToUse)
  {
    LearnerEvaluationConfiguration learnerConfig = new LearnerEvaluationConfiguration(configToUse);ErlangModule.setupErlangConfiguration(learnerConfig.config,new File(ErlangExamples,"locker/locker.erl"));
    learnerConfig.config.setErlangAlphabetAnyElements(EXPANSIONOFANY.ANY_WIBBLE);
    //learnerConfig.config.setScoreForAutomergeUponRestart(1);
    ErlangOracleLearner learner = new ErlangOracleLearner(null,learnerConfig);
    learner.GenerateInitialTraces();
    LearnerGraph locker = learner.learnMachine();
View Full Code Here

*/
 
  @Test
  public void testLockerLearning_withRestartCounter()
  {
    LearnerEvaluationConfiguration learnerConfig = new LearnerEvaluationConfiguration(config);ErlangModule.setupErlangConfiguration(learnerConfig.config,new File(ErlangExamples,"locker/locker.erl"));
    learnerConfig.config.setErlangAlphabetAnyElements(EXPANSIONOFANY.ANY_WIBBLE);
    learnerConfig.config.setTransitionMatrixImplType(STATETREE.STATETREE_SLOWTREE);
    //learnerConfig.config.setScoreForAutomergeUponRestart(1);
    ErlangOracleLearner learner = new ErlangOracleLearner(null,learnerConfig);
    Learner learnerAndObserver = new LearningConvergenceObserver(learner);
View Full Code Here

  }
 
  @Test
  public void testLockerLearningWithoutOutputMatching()
  {
    LearnerEvaluationConfiguration learnerConfig = new LearnerEvaluationConfiguration(config);ErlangModule.setupErlangConfiguration(learnerConfig.config,new File(ErlangExamples,"locker/locker.erl"));
    learnerConfig.config.setErlangAlphabetAnyElements(EXPANSIONOFANY.ANY_WIBBLE);
    learnerConfig.config.setUseErlangOutputs(false);
    ErlangOracleLearner learner = new ErlangOracleLearner(null,learnerConfig);
   
    learner.GenerateInitialTraces();
View Full Code Here

TOP

Related Classes of statechum.analysis.learning.observers.ProgressDecorator.LearnerEvaluationConfiguration

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.