Package cc.mallet.optimize

Examples of cc.mallet.optimize.LimitedMemoryBFGS


  }
 
  public Optimizer getOptimizer (InstanceList trainingSet) {
    getOptimizableCRF(trainingSet); // this will set this.mcrf if necessary
    if (opt == null || ocrf != opt.getOptimizable())
      opt = new LimitedMemoryBFGS(ocrf)// Alternative: opt = new ConjugateGradient (0.001);
    return opt;
  }
View Full Code Here


    for (; iter < max; iter++) {
      long startTime = System.currentTimeMillis();
     
      // train q
      ConstraintsOptimizableByPR opt = new ConstraintsOptimizableByPR(crf, train, model, numThreads);
      bfgs = new LimitedMemoryBFGS(opt);
      try {
        bfgs.optimize(maxIterPerStep);
      } catch (Exception e) {
        e.printStackTrace();
      }
      opt.shutdown();
     
      /*
      for (int j = 0; j < constraints.size(); j++) {
        constraints.get(j).print();
      }
      */
     
      qValue = opt.getCompleteValueContribution();
      assert(qValue > 0);
     
      // use to train p
      CRFOptimizableByKL optP = new CRFOptimizableByKL(crf, train, model, opt.getCachedDots(), numThreads, 1);
      optP.setGaussianPriorVariance(pGpv);
      LimitedMemoryBFGS bfgsP = new LimitedMemoryBFGS(optP);
     
      try {
        bfgsP.optimize(maxIterPerStep);
      } catch (Exception e) {
        e.printStackTrace();
      }
      optP.shutdown();
     
View Full Code Here

    ge.setGaussianPriorVariance(Double.POSITIVE_INFINITY);

    CRFOptimizableByGradientValues opt =
      new CRFOptimizableByGradientValues(crf,new Optimizable.ByGradientValue[] { optLikelihood, ge });
   
    LimitedMemoryBFGS optimizer = new LimitedMemoryBFGS(opt);

    try {
      converged = optimizer.optimize(numIterations);
    }
    catch (Exception e) {
      e.printStackTrace();
    }
   
    optimizer.reset();
    try {
      converged = optimizer.optimize(numIterations);
    }
    catch (Exception e) {
      e.printStackTrace();
    }
   
View Full Code Here

    return optimizable;
  }

  public Optimizer getOptimizer(Optimizable.ByGradientValue optimizable) {
    if (optimizer == null) {
      optimizer = new LimitedMemoryBFGS(optimizable);
    }
    return optimizer;
  }
View Full Code Here

    if (iteration == 0) {
      // train with log-likelihood only first
      CRFOptimizableByLabelLikelihood likelihood =
        new CRFOptimizableByLabelLikelihood(crf, labeled);
      likelihood.setGaussianPriorVariance(gaussianPriorVariance);
      this.bfgs = new LimitedMemoryBFGS(likelihood);
      logger.info ("CRF about to train with "+numIterations+" iterations");
      for (int i = 0; i < numIterations; i++) {
        try {
          converged = bfgs.optimize(1);
          iteration++;
          logger.info ("CRF finished one iteration of maximizer, i="+i);
          runEvaluators();
        } catch (IllegalArgumentException e) {
          e.printStackTrace();
          logger.info ("Catching exception; saying converged.");
          converged = true;
        } catch (Exception e) {
          e.printStackTrace();
          logger.info("Catching exception; saying converged.");
          converged = true;
        }
        if (converged) {
          logger.info ("CRF training has converged, i="+i);
          break;
        }
      }
      iteration = 0;
    }
   
    // train with log-likelihood + entropy regularization
    CRFOptimizableByLabelLikelihood likelihood = new CRFOptimizableByLabelLikelihood(crf, labeled);
    likelihood.setGaussianPriorVariance(gaussianPriorVariance);
    CRFOptimizableByEntropyRegularization regularization = new CRFOptimizableByEntropyRegularization(crf, unlabeled);
    regularization.setScalingFactor(this.entRegScalingFactor);
   
    CRFOptimizableByGradientValues regLikelihood = new CRFOptimizableByGradientValues(crf,
        new Optimizable.ByGradientValue[] { likelihood, regularization} );
    this.bfgs = new LimitedMemoryBFGS(regLikelihood);
    converged = false;
    logger.info ("CRF about to train with "+numIterations+" iterations");
    // sometimes resetting the optimizer helps to find
    // a better parameter setting
    for (int reset = 0; reset < DEFAULT_NUM_RESETS + 1; reset++) {
View Full Code Here

   
    // train supervised
    MaxEntOptimizableByLabelDistribution opt = new  MaxEntOptimizableByLabelDistribution(dataLabeled,p);
    opt.setGaussianPriorVariance(pGPV);

    LimitedMemoryBFGS bfgs = new LimitedMemoryBFGS(opt);
    try { bfgs.optimize(); } catch (Exception e) { e.printStackTrace(); }
    bfgs.reset();
    try { bfgs.optimize(); } catch (Exception e) { e.printStackTrace(); }
   
    double value = 0;
    for (MaxEntPRConstraint constraint : q.getConstraintFeatures()) {
      // plus sign because this returns negative values
      value += constraint.getCompleteValueContribution();
View Full Code Here

      }
    }
   
    PRAuxClassifierOptimizable optimizable = new PRAuxClassifierOptimizable(data,base,q);
   
    LimitedMemoryBFGS bfgs = new LimitedMemoryBFGS(optimizable);
    try { bfgs.optimize(); } catch (Exception e) { e.printStackTrace(); }
    bfgs.reset();
    try { bfgs.optimize(); } catch (Exception e) { e.printStackTrace(); }
   
    return base;
  }
View Full Code Here

        DMROptimizable optimizable = new DMROptimizable(parameterInstances, dmrParameters);
        optimizable.setRegularGaussianPriorVariance(0.5);
        optimizable.setInterceptGaussianPriorVariance(100.0);

    LimitedMemoryBFGS optimizer = new LimitedMemoryBFGS(optimizable);

    // Optimize once
    try {
      optimizer.optimize();
    } catch (IllegalArgumentException e) {
      // step size too small
    }

    // Restart with a fresh initialization to improve likelihood
    try {
      optimizer.optimize();
    } catch (IllegalArgumentException e) {
      // step size too small
    }
        dmrParameters = optimizable.getClassifier();
View Full Code Here

    if (iteration == 0) {
      // train with log-likelihood only first
      CRFOptimizableByLabelLikelihood likelihood =
        new CRFOptimizableByLabelLikelihood(crf, labeled);
      likelihood.setGaussianPriorVariance(gaussianPriorVariance);
      this.bfgs = new LimitedMemoryBFGS(likelihood);
      logger.info ("CRF about to train with "+numIterations+" iterations");
      for (int i = 0; i < numIterations; i++) {
        try {
          converged = bfgs.optimize(1);
          iteration++;
          logger.info ("CRF finished one iteration of maximizer, i="+i);
          runEvaluators();
        } catch (IllegalArgumentException e) {
          e.printStackTrace();
          logger.info ("Catching exception; saying converged.");
          converged = true;
        } catch (Exception e) {
          e.printStackTrace();
          logger.info("Catching exception; saying converged.");
          converged = true;
        }
        if (converged) {
          logger.info ("CRF training has converged, i="+i);
          break;
        }
      }
      iteration = 0;
    }
   
    // train with log-likelihood + entropy regularization
    CRFOptimizableByLabelLikelihood likelihood = new CRFOptimizableByLabelLikelihood(crf, labeled);
    likelihood.setGaussianPriorVariance(gaussianPriorVariance);
    CRFOptimizableByEntropyRegularization regularization = new CRFOptimizableByEntropyRegularization(crf, unlabeled);
    regularization.setScalingFactor(this.entRegScalingFactor);
   
    CRFOptimizableByGradientValues regLikelihood = new CRFOptimizableByGradientValues(crf,
        new Optimizable.ByGradientValue[] { likelihood, regularization} );
    this.bfgs = new LimitedMemoryBFGS(regLikelihood);
    converged = false;
    logger.info ("CRF about to train with "+numIterations+" iterations");
    // sometimes resetting the optimizer helps to find
    // a better parameter setting
    for (int reset = 0; reset < DEFAULT_NUM_RESETS + 1; reset++) {
View Full Code Here

   
    CRFOptimizableByGECriteria ge =
      new CRFOptimizableByGECriteria(criteria, crf, unlabeledSet, numThreads);
    ge.setGaussianPriorVariance(gaussianPriorVariance);
   
    LimitedMemoryBFGS bfgs = new LimitedMemoryBFGS(ge);
   
    converged = false;
    logger.info ("CRF about to train with "+numIterations+" iterations");
    // sometimes resetting the optimizer helps to find
    // a better parameter setting
    int iter = 0;
    for (int reset = 0; reset < DEFAULT_NUM_RESETS + 1; reset++) {
      for (; iter < numIterations; iter++) {
        try {
          converged = bfgs.optimize (1);
          iteration++;
          logger.info ("CRF finished one iteration of maximizer, i="+iter);
          runEvaluators();
        } catch (IllegalArgumentException e) {
          e.printStackTrace();
          logger.info ("Catching exception; saying converged.");
          converged = true;
        } catch (Exception e) {
          e.printStackTrace();
          logger.info("Catching exception; saying converged.");
          converged = true;
        }
        if (converged) {
          logger.info ("CRF training has converged, i="+iter);
          break;
        }
      }
      bfgs.reset();
    }
   
    ge.shutdown();
   
    return converged;
View Full Code Here

TOP

Related Classes of cc.mallet.optimize.LimitedMemoryBFGS

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.