Package org.apache.solr.common.params

Examples of org.apache.solr.common.params.SolrParams


  }

  @Override
  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception
  {
    SolrParams params = req.getParams();
    SolrIndexSearcher searcher = req.getSearcher();
   
   
    MoreLikeThisHelper mlt = new MoreLikeThisHelper( params, searcher );
    List<Query> filters = SolrPluginUtils.parseFilterQueries(req);
   
    // Hold on to the interesting terms if relevant
    TermStyle termStyle = TermStyle.get( params.get( MoreLikeThisParams.INTERESTING_TERMS ) );
    List<InterestingTerm> interesting = (termStyle == TermStyle.NONE )
      ? null : new ArrayList<InterestingTerm>( mlt.mlt.getMaxQueryTerms() );
   
    DocListAndSet mltDocs = null;
    String q = params.get( CommonParams.Q );
   
    // Parse Required Params
    // This will either have a single Reader or valid query
    Reader reader = null;
    try {
      if (q == null || q.trim().length() < 1) {
        Iterable<ContentStream> streams = req.getContentStreams();
        if (streams != null) {
          Iterator<ContentStream> iter = streams.iterator();
          if (iter.hasNext()) {
            reader = iter.next().getReader();
          }
          if (iter.hasNext()) {
            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
                "MoreLikeThis does not support multiple ContentStreams");
          }
        }
      }

      // What fields do we need to return
      String fl = params.get(CommonParams.FL);
      int flags = 0;
      if (fl != null) {
        flags |= SolrPluginUtils.setReturnFields(fl, rsp);
      }

      int start = params.getInt(CommonParams.START, 0);
      int rows = params.getInt(CommonParams.ROWS, 10);

      // Find documents MoreLikeThis - either with a reader or a query
      // --------------------------------------------------------------------------------
      if (reader != null) {
        mltDocs = mlt.getMoreLikeThis(reader, start, rows, filters,
            interesting, flags);
      } else if (q != null) {
        // Matching options
        boolean includeMatch = params.getBool(MoreLikeThisParams.MATCH_INCLUDE,
            true);
        int matchOffset = params.getInt(MoreLikeThisParams.MATCH_OFFSET, 0);
        // Find the base match
        Query query = QueryParsing.parseQuery(q, params.get(CommonParams.DF),
            params, req.getSchema());
        DocList match = searcher.getDocList(query, null, null, matchOffset, 1,
            flags); // only get the first one...
        if (includeMatch) {
          rsp.add("match", match);
        }

        // This is an iterator, but we only handle the first match
        DocIterator iterator = match.iterator();
        if (iterator.hasNext()) {
          // do a MoreLikeThis query for each document in results
          int id = iterator.nextDoc();
          mltDocs = mlt.getMoreLikeThis(id, start, rows, filters, interesting,
              flags);
        }
      } else {
        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
            "MoreLikeThis requires either a query (?q=) or text to find similar documents.");
      }

    } finally {
      if (reader != null) {
        reader.close();
      }
    }
   
    if( mltDocs == null ) {
      mltDocs = new DocListAndSet(); // avoid NPE
    }
    rsp.add( "response", mltDocs.docList );
   
 
    if( interesting != null ) {
      if( termStyle == TermStyle.DETAILS ) {
        NamedList<Float> it = new NamedList<Float>();
        for( InterestingTerm t : interesting ) {
          it.add( t.term.toString(), t.boost );
        }
        rsp.add( "interestingTerms", it );
      }
      else {
        List<String> it = new ArrayList<String>( interesting.size() );
        for( InterestingTerm t : interesting ) {
          it.add( t.term.text());
        }
        rsp.add( "interestingTerms", it );
      }
    }
   
    // maybe facet the results
    if (params.getBool(FacetParams.FACET,false)) {
      if( mltDocs.docSet == null ) {
        rsp.add( "facet_counts", null );
      }
      else {
        SimpleFacets f = new SimpleFacets(req, mltDocs.docSet, params );
View Full Code Here


      this.searcher = searcher;
      this.reader = searcher.getReader();
      this.uniqueKeyField = searcher.getSchema().getUniqueKeyField();
      this.needDocSet = params.getBool(FacetParams.FACET,false);
     
      SolrParams required = params.required();
      String[] fields = splitList.split( required.get(MoreLikeThisParams.SIMILARITY_FIELDS) );
      if( fields.length < 1 ) {
        throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,
            "MoreLikeThis requires at least one similarity field: "+MoreLikeThisParams.SIMILARITY_FIELDS );
      }
     
View Full Code Here

public abstract class ContentStreamHandlerBase extends RequestHandlerBase {
  public static Logger log = LoggerFactory.getLogger(XmlUpdateRequestHandler.class);

  @Override
  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
    SolrParams params = req.getParams();
    UpdateRequestProcessorChain processorChain =
            req.getCore().getUpdateProcessingChain(SolrPluginUtils.resolveUpdateChainParam(params, log));

    UpdateRequestProcessor processor = processorChain.createProcessor(req, rsp);
View Full Code Here

    return mapDirectory;
  }

  @Override
  public void init(NamedList args) {
    SolrParams params = SolrParams.toSolrParams( args );
    maxChunk = params.getInt("maxChunkSize", MMapDirectory.DEFAULT_MAX_BUFF);
    if (maxChunk <= 0){
      throw new IllegalArgumentException("maxChunk must be greater than 0");
    }
    unmapHack = params.getBool("unmap", true);
  }
View Full Code Here

*/
public class PointType extends CoordinateFieldType implements SpatialQueryable {

  @Override
  protected void init(IndexSchema schema, Map<String, String> args) {
    SolrParams p = new MapSolrParams(args);
    dimension = p.getInt(DIMENSION, DEFAULT_DIMENSION);
    if (dimension < 1) {
      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
              "The dimension must be > 0: " + dimension);
    }
    args.remove(DIMENSION);
View Full Code Here

  public CSVWriter(Writer writer, SolrQueryRequest req, SolrQueryResponse rsp) {
    super(writer, req, rsp);
  }

  public void writeResponse() throws IOException {
    SolrParams params = req.getParams();

    strategy = new CSVStrategy(',', '"', CSVStrategy.COMMENTS_DISABLED, CSVStrategy.ESCAPE_DISABLED, false, false, false, true);
    CSVStrategy strat = strategy;

    String sep = params.get(CSV_SEPARATOR);
    if (sep!=null) {
      if (sep.length()!=1) throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"Invalid separator:'"+sep+"'");
      strat.setDelimiter(sep.charAt(0));
    }

    String nl = params.get(CSV_NEWLINE);
    if (nl!=null) {
      if (nl.length()==0) throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"Invalid newline:'"+nl+"'");
      strat.setPrinterNewline(nl);
    }

    String encapsulator = params.get(CSV_ENCAPSULATOR);
    String escape = params.get(CSV_ESCAPE);
    if (encapsulator!=null) {
      if (encapsulator.length()!=1) throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"Invalid encapsulator:'"+encapsulator+"'");
      strat.setEncapsulator(encapsulator.charAt(0));
    }

    if (escape!=null) {
      if (escape.length()!=1) throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"Invalid escape:'"+escape+"'");
      strat.setEscape(escape.charAt(0));
      if (encapsulator == null) {
        strat.setEncapsulator( CSVStrategy.ENCAPSULATOR_DISABLED);
      }
    }

    if (strat.getEscape() == '\\') {
      // If the escape is the standard backslash, then also enable
      // unicode escapes (it's harmless since 'u' would not otherwise
      // be escaped.
      strat.setUnicodeEscapeInterpretation(true);
    }

    printer = new CSVPrinter(writer, strategy);
   

    CSVStrategy mvStrategy = new CSVStrategy(strategy.getDelimiter(), CSVStrategy.ENCAPSULATOR_DISABLED, CSVStrategy.COMMENTS_DISABLED, '\\', false, false, false, false);
    strat = mvStrategy;

    sep = params.get(MV_SEPARATOR);
    if (sep!=null) {
      if (sep.length()!=1) throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"Invalid mv separator:'"+sep+"'");
      strat.setDelimiter(sep.charAt(0));
    }

    encapsulator = params.get(MV_ENCAPSULATOR);
    escape = params.get(MV_ESCAPE);

    if (encapsulator!=null) {
      if (encapsulator.length()!=1) throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"Invalid mv encapsulator:'"+encapsulator+"'");
      strat.setEncapsulator(encapsulator.charAt(0));
      if (escape == null) {
        strat.setEscape(CSVStrategy.ESCAPE_DISABLED);
      }
    }

    escape = params.get(MV_ESCAPE);
    if (escape!=null) {
      if (escape.length()!=1) throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"Invalid mv escape:'"+escape+"'");
      strat.setEscape(escape.charAt(0));
      // encapsulator will already be disabled if it wasn't specified
    }

    returnScore = returnFields != null && returnFields.contains("score");
    boolean needListOfFields = returnFields==null || returnFields.size()==0 || (returnFields.size()==1 && returnScore) || returnFields.contains("*");
    Collection<String> fields = returnFields;

    Object responseObj = rsp.getValues().get("response");
    if (needListOfFields) {
      if (responseObj instanceof SolrDocumentList) {
        // get the list of fields from the SolrDocumentList
        fields = new LinkedHashSet<String>();
        for (SolrDocument sdoc: (SolrDocumentList)responseObj) {
          fields.addAll(sdoc.getFieldNames());
        }
      } else {
        // get the list of fields from the index
        fields = req.getSearcher().getFieldNames();
      }
      if (returnScore) {
        fields.add("score");
      } else {
        fields.remove("score");
      }
    }

    CSVSharedBufPrinter csvPrinterMV = new CSVSharedBufPrinter(mvWriter, mvStrategy);

    for (String field : fields) {
      if (field.equals("score")) {
        CSVField csvField = new CSVField();
        csvField.name = "score";
        csvFields.put("score", csvField);
        continue;
      }

      SchemaField sf = schema.getFieldOrNull(field);
      if (sf == null) {
        FieldType ft = new StrField();
        sf = new SchemaField(field, ft);
      }

      // if we got the list of fields from the index, only list stored fields
      if (returnFields==null && sf != null && !sf.stored()) {
        continue;
      }

      // check for per-field overrides
      sep = params.get("f." + field + '.' + CSV_SEPARATOR);
      encapsulator = params.get("f." + field + '.' + CSV_ENCAPSULATOR);
      escape = params.get("f." + field + '.' + CSV_ESCAPE);

      CSVSharedBufPrinter csvPrinter = csvPrinterMV;
      if (sep != null || encapsulator != null || escape != null) {
        // create a new strategy + printer if there were any per-field overrides
        strat = (CSVStrategy)mvStrategy.clone();
        if (sep!=null) {
          if (sep.length()!=1) throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"Invalid mv separator:'"+sep+"'");
          strat.setDelimiter(sep.charAt(0));
        }
        if (encapsulator!=null) {
          if (encapsulator.length()!=1) throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"Invalid mv encapsulator:'"+encapsulator+"'");
          strat.setEncapsulator(encapsulator.charAt(0));
          if (escape == null) {
            strat.setEscape(CSVStrategy.ESCAPE_DISABLED);
          }
        }
        if (escape!=null) {
          if (escape.length()!=1) throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"Invalid mv escape:'"+escape+"'");
          strat.setEscape(escape.charAt(0));
          if (encapsulator == null) {
            strat.setEncapsulator(CSVStrategy.ENCAPSULATOR_DISABLED);
          }
        }       
        csvPrinter = new CSVSharedBufPrinter(mvWriter, strat);
      }


      CSVField csvField = new CSVField();
      csvField.name = field;
      csvField.sf = sf;
      csvField.mvPrinter = csvPrinter;
      csvFields.put(field, csvField);
    }

    NullValue = params.get(CSV_NULL, "");

    if (params.getBool(CSV_HEADER, true)) {
      for (CSVField csvField : csvFields.values()) {
        printer.print(csvField.name);
      }
      printer.println();
    }
View Full Code Here

   */
  public static QParser getParser(String qstr, String defaultType, SolrQueryRequest req) throws ParseException {
    // SolrParams localParams = QueryParsing.getLocalParams(qstr, req.getParams());

    String stringIncludingLocalParams = qstr;
    SolrParams localParams = null;
    SolrParams globalParams = req.getParams();
    boolean valFollowedParams = true;
    int localParamsEnd = -1;

    if (qstr != null && qstr.startsWith(QueryParsing.LOCALPARAM_START)) {
      Map<String, String> localMap = new HashMap<String, String>();
View Full Code Here

  private QParser altQParser;


  @Override
  public Query parse() throws ParseException {
    SolrParams localParams = getLocalParams();
    SolrParams params = getParams();
   
    SolrParams solrParams = localParams == null ? params : new DefaultSolrParams(localParams, params);

    queryFields = U.parseFieldBoosts(solrParams.getParams(DMP.QF));
    if (0 == queryFields.size()) {
      queryFields.put(req.getSchema().getDefaultSearchFieldName(), 1.0f);
    }
   
    // Boosted phrase of the full query string
    Map<String,Float> phraseFields =
      U.parseFieldBoosts(solrParams.getParams(DMP.PF));
    // Boosted Bi-Term Shingles from the query string
    Map<String,Float> phraseFields2 =
      U.parseFieldBoosts(solrParams.getParams("pf2"));
    // Boosted Tri-Term Shingles from the query string
    Map<String,Float> phraseFields3 =
      U.parseFieldBoosts(solrParams.getParams("pf3"));

    float tiebreaker = solrParams.getFloat(DMP.TIE, 0.0f);

    int pslop = solrParams.getInt(DMP.PS, 0);
    int qslop = solrParams.getInt(DMP.QS, 0);

    // remove stopwords from mandatory "matching" component?
    boolean stopwords = solrParams.getBool("stopwords", true);

    /* the main query we will execute.  we disable the coord because
     * this query is an artificial construct
     */
    BooleanQuery query = new BooleanQuery(true);

    /* * * Main User Query * * */
    parsedUserQuery = null;
    String userQuery = getString();
    altUserQuery = null;
    if( userQuery == null || userQuery.length() < 1 ) {
      // If no query is specified, we may have an alternate
      String altQ = solrParams.get( DMP.ALTQ );
      if (altQ != null) {
        altQParser = subQuery(altQ, null);
        altUserQuery = altQParser.getQuery();
        query.add( altUserQuery , BooleanClause.Occur.MUST );
      } else {
        throw new SolrException( SolrException.ErrorCode.BAD_REQUEST, "missing query string" );
      }
    }
    else {    
      // There is a valid query string
      // userQuery = partialEscape(U.stripUnbalancedQuotes(userQuery)).toString();

      boolean lowercaseOperators = solrParams.getBool("lowercaseOperators", true);
      String mainUserQuery = userQuery;

      ExtendedSolrQueryParser up =
        new ExtendedSolrQueryParser(this, IMPOSSIBLE_FIELD_NAME);
      up.addAlias(IMPOSSIBLE_FIELD_NAME,
                tiebreaker, queryFields);
      up.setPhraseSlop(qslop);     // slop for explicit user phrase queries
      up.setAllowLeadingWildcard(true);

      // defer escaping and only do if lucene parsing fails, or we need phrases
      // parsing fails.  Need to sloppy phrase queries anyway though.
      List<Clause> clauses = null;
      boolean specialSyntax = false;
      int numPluses = 0;
      int numMinuses = 0;
      int numOptional = 0;
      int numAND = 0;
      int numOR = 0;
      int numNOT = 0;
      boolean sawLowerAnd=false;
      boolean sawLowerOr=false;

      clauses = splitIntoClauses(userQuery, false);
      for (Clause clause : clauses) {
        if (!clause.isPhrase && clause.hasSpecialSyntax) {
          specialSyntax = true;
        }
        if (clause.must == '+') numPluses++;
        if (clause.must == '-') numMinuses++;
        if (clause.isBareWord()) {
          String s = clause.val;
          if ("AND".equals(s)) {
            numAND++;
          } else if ("OR".equals(s)) {
            numOR++;
          } else if ("NOT".equals(s)) {
            numNOT++;
          } else if (lowercaseOperators) {
            if ("and".equals(s)) {
              numAND++;
              sawLowerAnd=true;
            } else if ("or".equals(s)) {
              numOR++;
              sawLowerOr=true;
            }
          }
        }
      }
      numOptional = clauses.size() - (numPluses + numMinuses);

      // convert lower or mixed case operators to uppercase if we saw them.
      // only do this for the lucene query part and not for phrase query boosting
      // since some fields might not be case insensitive.
      // We don't use a regex for this because it might change and AND or OR in
      // a phrase query in a case sensitive field.
      if (sawLowerAnd || sawLowerOr) {
        StringBuilder sb = new StringBuilder();
        for (int i=0; i<clauses.size(); i++) {
          Clause clause = clauses.get(i);
          String s = clause.raw;
          // and and or won't be operators at the start or end
          if (i>0 && i+1<clauses.size()) {
            if ("AND".equalsIgnoreCase(s)) {
              s="AND";
            } else if ("OR".equalsIgnoreCase(s)) {
              s="OR";
            }
          }
          sb.append(s);
          sb.append(' ');
        }

        mainUserQuery = sb.toString();
      }

      // For correct lucene queries, turn off mm processing if there
      // were explicit operators (except for AND).
      boolean doMinMatched = (numOR + numNOT + numPluses + numMinuses) == 0;

      try {
        up.setRemoveStopFilter(!stopwords);
        up.exceptions = true;
        parsedUserQuery = up.parse(mainUserQuery);

        if (stopwords && isEmpty(parsedUserQuery)) {
         // if the query was all stop words, remove none of them
          up.setRemoveStopFilter(true);
          parsedUserQuery = up.parse(mainUserQuery);         
        }
      } catch (Exception e) {
        // ignore failure and reparse later after escaping reserved chars
        up.exceptions = false;
      }

      if (parsedUserQuery != null && doMinMatched) {
        String minShouldMatch = solrParams.get(DMP.MM, "100%");
        if (parsedUserQuery instanceof BooleanQuery) {
          U.setMinShouldMatch((BooleanQuery)parsedUserQuery, minShouldMatch);
        }
      }


      if (parsedUserQuery == null) {
        StringBuilder sb = new StringBuilder();
        for (Clause clause : clauses) {

          boolean doQuote = clause.isPhrase;

          String s=clause.val;
          if (!clause.isPhrase && ("OR".equals(s) || "AND".equals(s) || "NOT".equals(s))) {
            doQuote=true;
          }

          if (clause.must != 0) {
            sb.append(clause.must);
          }
          if (clause.field != null) {
            sb.append(clause.field);
            sb.append(':');
          }
          if (doQuote) {
            sb.append('"');
          }
          sb.append(clause.val);
          if (doQuote) {
            sb.append('"');
          }
          sb.append(' ');
        }
        String escapedUserQuery = sb.toString();
        parsedUserQuery = up.parse(escapedUserQuery);

        // Only do minimum-match logic
        String minShouldMatch = solrParams.get(DMP.MM, "100%");

        if (parsedUserQuery instanceof BooleanQuery) {
          BooleanQuery t = new BooleanQuery();
          U.flattenBooleanQuery(t, (BooleanQuery)parsedUserQuery);
          U.setMinShouldMatch(t, minShouldMatch);
          parsedUserQuery = t;
        }
      }

      query.add(parsedUserQuery, BooleanClause.Occur.MUST);

      // sloppy phrase queries for proximity
      if (phraseFields.size() > 0 ||
          phraseFields2.size() > 0 ||
          phraseFields3.size() > 0) {
       
        // find non-field clauses
        List<Clause> normalClauses = new ArrayList<Clause>(clauses.size());
        for (Clause clause : clauses) {
          if (clause.field != null || clause.isPhrase) continue;
          // check for keywords "AND,OR,TO"
          if (clause.isBareWord()) {
            String s = clause.val.toString();
            // avoid putting explict operators in the phrase query
            if ("OR".equals(s) || "AND".equals(s) || "NOT".equals(s) || "TO".equals(s)) continue;
          }
          normalClauses.add(clause);
        }

        // full phrase...
        addShingledPhraseQueries(query, normalClauses, phraseFields, 0,
                                 tiebreaker, pslop);
        // shingles...
        addShingledPhraseQueries(query, normalClauses, phraseFields2, 2
                                 tiebreaker, pslop);
        addShingledPhraseQueries(query, normalClauses, phraseFields3, 3,
                                 tiebreaker, pslop);
       
      }
    }



    /* * * Boosting Query * * */
    boostParams = solrParams.getParams(DMP.BQ);
    //List<Query> boostQueries = U.parseQueryStrings(req, boostParams);
    boostQueries=null;
    if (boostParams!=null && boostParams.length>0) {
      boostQueries = new ArrayList<Query>();
      for (String qs : boostParams) {
        if (qs.trim().length()==0) continue;
        Query q = subQuery(qs, null).getQuery();
        boostQueries.add(q);
      }
    }
    if (null != boostQueries) {
      for(Query f : boostQueries) {
        query.add(f, BooleanClause.Occur.SHOULD);
      }
    }

    /* * * Boosting Functions * * */

    String[] boostFuncs = solrParams.getParams(DMP.BF);
    if (null != boostFuncs && 0 != boostFuncs.length) {
      for (String boostFunc : boostFuncs) {
        if(null == boostFunc || "".equals(boostFunc)) continue;
        Map<String,Float> ff = SolrPluginUtils.parseFieldBoosts(boostFunc);
        for (String f : ff.keySet()) {
          Query fq = subQuery(f, FunctionQParserPlugin.NAME).getQuery();
          Float b = ff.get(f);
          if (null != b) {
            fq.setBoost(b);
          }
          query.add(fq, BooleanClause.Occur.SHOULD);
        }
      }
    }


    //
    // create a boosted query (scores multiplied by boosts)
    //
    Query topQuery = query;
    multBoosts = solrParams.getParams("boost");
    if (multBoosts!=null && multBoosts.length>0) {

      List<ValueSource> boosts = new ArrayList<ValueSource>();
      for (String boostStr : multBoosts) {
        if (boostStr==null || boostStr.length()==0) continue;
View Full Code Here

    if( req.getDocuments()==null || req.getDocuments().isEmpty() ) {
      blockUntilFinished();
      return super.request( request );
    }

    SolrParams params = req.getParams();
    if( params != null ) {
      // check if it is waiting for the searcher
      if( params.getBool( UpdateParams.WAIT_SEARCHER, false ) ) {
        log.info( "blocking for commit/optimize" );
        blockUntilFinished()// empty the queue
        return super.request( request );
      }
    }
View Full Code Here

                  while( req != null ) {
                    log.debug( "sending: {}" , req );
                    req.writeXML( writer );
                   
                    // check for commit or optimize
                    SolrParams params = req.getParams();
                    if( params != null ) {
                      String fmt = null;
                      if( params.getBool( UpdateParams.OPTIMIZE, false ) ) {
                        fmt = "<optimize waitSearcher=\"%s\" waitFlush=\"%s\" />";
                      }
                      else if( params.getBool( UpdateParams.COMMIT, false ) ) {
                        fmt = "<commit waitSearcher=\"%s\" waitFlush=\"%s\" />";
                      }
                      if( fmt != null ) {
                        log.info( fmt );
                        writer.write( String.format( fmt,
                            params.getBool( UpdateParams.WAIT_SEARCHER, false )+"",
                            params.getBool( UpdateParams.WAIT_FLUSH, false )+"") );
                      }
                    }
                   
                    writer.flush();
                    req = queue.poll( 250, TimeUnit.MILLISECONDS );
View Full Code Here

TOP

Related Classes of org.apache.solr.common.params.SolrParams

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.