Examples of QualityBenchmark


Examples of org.apache.lucene.benchmark.quality.QualityBenchmark

    judge.validateData(qqs, logger);
   
    IndexSearcher searcher = new IndexSearcher(FSDirectory.open(new File(workDir,"index")), true);

    QualityQueryParser qqParser = new SimpleQQParser("title","body");
    QualityBenchmark qrun = new QualityBenchmark(qqs, qqParser, searcher, docNameField);
   
    SubmissionReport submitLog = DEBUG ? new SubmissionReport(logger, "TestRun") : null;
    qrun.setMaxResults(maxResults);
    QualityStats stats[] = qrun.execute(judge, submitLog, logger);
   
    // --------- verify by the way judgments were altered for this test:
    // for some queries, depending on m = qnum % 8
    // m==0: avg_precision and recall are hurt, by marking fake docs as relevant
    // m==1: precision_at_n and avg_precision are hurt, by unmarking relevant docs
View Full Code Here

Examples of org.apache.lucene.benchmark.quality.QualityBenchmark

    judge.validateData(qqs, logger);
   
    IndexSearcher searcher = new IndexSearcher(FSDirectory.getDirectory(new File(workDir,"index")));

    QualityQueryParser qqParser = new SimpleQQParser("title","body");
    QualityBenchmark qrun = new QualityBenchmark(qqs, qqParser, searcher, docNameField);
   
    SubmissionReport submitLog = DEBUG ? new SubmissionReport(logger, "TestRun") : null;
    qrun.setMaxResults(maxResults);
    QualityStats stats[] = qrun.execute(judge, submitLog, logger);
   
    // --------- verify by the way judgments were altered for this test:
    // for some queries, depending on m = qnum % 8
    // m==0: avg_precision and recall are hurt, by marking fake docs as relevant
    // m==1: precision_at_n and avg_precision are hurt, by unmarking relevant docs
View Full Code Here

Examples of org.apache.lucene.benchmark.quality.QualityBenchmark

    judge.validateData(qqs, logger);
   
    IndexSearcher searcher = new IndexSearcher(FSDirectory.open(new File(workDir,"index")));

    QualityQueryParser qqParser = new SimpleQQParser("title","body");
    QualityBenchmark qrun = new QualityBenchmark(qqs, qqParser, searcher, docNameField);
   
    SubmissionReport submitLog = DEBUG ? new SubmissionReport(logger, "TestRun") : null;
    qrun.setMaxResults(maxResults);
    QualityStats stats[] = qrun.execute(judge, submitLog, logger);
   
    // --------- verify by the way judgments were altered for this test:
    // for some queries, depending on m = qnum % 8
    // m==0: avg_precision and recall are hurt, by marking fake docs as relevant
    // m==1: precision_at_n and avg_precision are hurt, by unmarking relevant docs
View Full Code Here

Examples of org.apache.lucene.benchmark.quality.QualityBenchmark

   
    // set the parsing of quality queries into Lucene queries.
    QualityQueryParser qqParser = new SimpleQQParser(fieldSet.toArray(new String[0]), "body");

    // run the benchmark
    QualityBenchmark qrun = new QualityBenchmark(qqs, qqParser, searcher, docNameField);
    qrun.setMaxResults(maxResults);
    QualityStats stats[] = qrun.execute(judge, submitLog, logger);

    // print an avarage sum of the results
    QualityStats avg = QualityStats.average(stats);
    avg.log("SUMMARY", 2, logger, "  ");
   
View Full Code Here

Examples of org.apache.lucene.benchmark.quality.QualityBenchmark

   
    // set the parsing of quality queries into Lucene queries.
    QualityQueryParser qqParser = new SimpleQQParser(fieldSet.toArray(new String[0]), "body");

    // run the benchmark
    QualityBenchmark qrun = new QualityBenchmark(qqs, qqParser, searcher, docNameField);
    qrun.setMaxResults(maxResults);
    QualityStats stats[] = qrun.execute(judge, submitLog, logger);

    // print an avarage sum of the results
    QualityStats avg = QualityStats.average(stats);
    avg.log("SUMMARY", 2, logger, "  ");
   
View Full Code Here

Examples of uk.ac.ucl.panda.applications.evaluation.trec.QualityBenchmark

      // set the parsing of quality queries into Lucene queries.
      QualityQueryParser qqParser = new SimpleQQParser("title", "body");

      // run the benchmark
      QualityBenchmark qrun = new QualityBenchmark(qqs, qqParser, index, docNameField);
      qrun.setMaxResults(maxResults);
      SubmissionReport submitLog = null;
     
     
     
      QualityStats stats[] = qrun.execute(judge, submitLog, logger, scorelogger);

      // print an avarage sum of the results
      QualityStats avg = QualityStats.average(stats);
      avg.log("SUMMARY", 2, logger, "  ");
 
View Full Code Here

Examples of uk.ac.ucl.panda.applications.evaluation.trec.QualityBenchmark

      // set the parsing of quality queries into Lucene queries.
      QualityQueryParser qqParser = new SimpleQQParser("title", "body");

      // run the benchmark
      QualityBenchmark qrun = new QualityBenchmark(qqs, qqParser, index, docNameField);
      qrun.setMaxResults(maxResults);
      SubmissionReport submitLog = null;
     
           
            //batch
             logger.print("MAP"+'\t'+"MRR"+'\t'+"Recall"+'\t'+"1-call"+'\t'+"2-call"+'\t'+"3-call"+'\t'+"4-call"+'\t'+"5-call"+'\t'+"6-call"+'\t'+"7-call"+'\t'+"8-call"+'\t'+"9-call"+'\t'+"10-call"+'\t'+"NDCG@1"+'\t'+"NDCG@5"+'\t'+"NDCG@10"+'\t'+"NDCG@15"+'\t'+"NDCG@20"+"NDCG@35"+'\t'+"NDCG@50"+'\t'+"NDCG@70"+'\t'+"NDCG@100"+'\t'+"NDCG@200"+'\t'+"NDCG@250"+'\t'+"NDCG@400"+'\t'+"NDCG@500"+'\t'+"NDCG@600"+'\t'+"NDCG@700"+'\t');
            for(int i =1; i<=70 ;i++){
                logger.print('\t'+"Precision@"+i);
            }
           
            logger.println();
      /**
       * Var adjust
       */

        
      for(double a1=0.0d; a1<=0.0d; a1+=10.0d){
     
      QualityStats stats[] = qrun.execute(judge, submitLog, null, scorelogger, a1, 0);

      // print an avarage sum of the results
      QualityStats avg = QualityStats.average(stats);
      avg.batch_log(Double.toString(a1) , 2, logger, "  ");
 
View Full Code Here

Examples of uk.ac.ucl.panda.applications.evaluation.trec.QualityBenchmark

      // set the parsing of quality queries into Lucene queries.
      QualityQueryParser qqParser = new SimpleQQParser("title", "body");

      // run the benchmark
      QualityBenchmark qrun = new QualityBenchmark(qqs, qqParser, index, docNameField);
      qrun.setMaxResults(maxResults);
      SubmissionReport submitLog = null;


            //batch
             logger.print("MAP"+'\t'+"MRR"+'\t'+"Recall"+'\t'+"1-call"+'\t'+"2-call"+'\t'+"3-call"+'\t'+"4-call"+'\t'+"5-call"+'\t'+"6-call"+'\t'+"7-call"+'\t'+"8-call"+'\t'+"9-call"+'\t'+"10-call"+'\t'+"NDCG@1"+'\t'+"NDCG@5"+'\t'+"NDCG@10"+'\t'+"NDCG@15"+'\t'+"NDCG@20"+"NDCG@35"+'\t'+"NDCG@50"+'\t'+"NDCG@70"+'\t'+"NDCG@100"+'\t'+"NDCG@200"+'\t'+"NDCG@250"+'\t'+"NDCG@400"+'\t'+"NDCG@500"+'\t'+"NDCG@600"+'\t'+"NDCG@700"+'\t');
            for(int i =1; i<=70 ;i++){
                logger.print('\t'+"Precision@"+i);
            }

            logger.println();
      /**
       * Var adjust
       */


      for(double a1=0.000001d; a1<=1.0d; a1+=1.0d){

            for(double a2=10.0d; a2<=15.0d; a2+=10.0d){
      QualityStats stats[] = qrun.execute_var(judge, submitLog, null, scorelogger, a1, a2);
      // print an avarage sum of the results
      QualityStats avg = QualityStats.average(stats);
      avg.batch_log(Double.toString(a1) , 2, logger, "  ");
        }

View Full Code Here

Examples of uk.ac.ucl.panda.applications.evaluation.trec.QualityBenchmark

      // set the parsing of quality queries into Lucene queries.
      QualityQueryParser qqParser = new SimpleQQParser("title", "body");

      // run the benchmark
      QualityBenchmark qrun = new QualityBenchmark(qqs, qqParser, index, docNameField);
      qrun.setMaxResults(maxResults);
      SubmissionReport submitLog = null;


            //batch
             logger.print("MAP"+'\t'+"MRR"+'\t'+"Recall"+'\t'+"1-call"+'\t'+"2-call"+'\t'+"3-call"+'\t'+"4-call"+'\t'+"5-call"+'\t'+"6-call"+'\t'+"7-call"+'\t'+"8-call"+'\t'+"9-call"+'\t'+"10-call"+'\t'+"NDCG@1"+'\t'+"NDCG@5"+'\t'+"NDCG@10"+'\t'+"NDCG@15"+'\t'+"NDCG@20"+"NDCG@35"+'\t'+"NDCG@50"+'\t'+"NDCG@70"+'\t'+"NDCG@100"+'\t'+"NDCG@200"+'\t'+"NDCG@250"+'\t'+"NDCG@400"+'\t'+"NDCG@500"+'\t'+"NDCG@600"+'\t'+"NDCG@700"+'\t');
            for(int i =1; i<=70 ;i++){
                logger.print('\t'+"Precision@"+i);
            }

            logger.println();
      /**
       * Var adjust
       */


      for(double a1=0.0d; a1<=0.0d; a1+=1.0d){

      QualityStats stats[] = qrun.execute_plot(judge, submitLog, null, scorelogger, a1, 0, relScoreLogger);

      // print an avarage sum of the results
      QualityStats avg = QualityStats.average(stats);
      avg.batch_log(Double.toString(a1) , 2, logger, "  ");

View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.