Package org.apache.solr.schema

Examples of org.apache.solr.schema.IndexSchema


    dataDir = SolrResourceLoader.normalizeDir(dataDir);

    log.info(logid+"Opening new SolrCore at " + resourceLoader.getInstanceDir() + ", dataDir="+dataDir);

    if (schema==null) {
      schema = new IndexSchema(config, IndexSchema.DEFAULT_SCHEMA_FILE, null);
    }

    //Initialize JMX
    if (config.jmxConfig.enabled) {
      infoRegistry = new JmxMonitoredMap<String, SolrInfoMBean>(name, config.jmxConfig);
View Full Code Here


    * @param schemaFile schema filename
    */
      public TestHarness( String dataDirectory,
                          SolrConfig solrConfig,
                          String schemaFile) {
     this( dataDirectory, solrConfig, new IndexSchema(solrConfig, schemaFile, null));
   }
View Full Code Here

  public static final int DEFAULT_COUNT = 10;
 
  @Override
  public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception
  {   
    IndexSchema schema = req.getSchema();
    SolrIndexSearcher searcher = req.getSearcher();
    IndexReader reader = searcher.getReader();
    SolrParams params = req.getParams();
    int numTerms = params.getInt( NUMTERMS, DEFAULT_COUNT );
       
    // Always show the core lucene info
    rsp.add("index", getIndexInfo(reader, numTerms>0 ) );

    Integer docId = params.getInt( DOC_ID );
    if( docId == null && params.get( ID ) != null ) {
      // Look for something with a given solr ID
      SchemaField uniqueKey = schema.getUniqueKeyField();
      String v = uniqueKey.getType().toInternal( params.get(ID) );
      Term t = new Term( uniqueKey.getName(), v );
      docId = searcher.getFirstMatch( t );
      if( docId < 0 ) {
        throw new SolrException( SolrException.ErrorCode.NOT_FOUND, "Can't find document: "+params.get( ID ) );
View Full Code Here

  private static SimpleOrderedMap<Object> getIndexedFieldsInfo(
    final SolrIndexSearcher searcher, final Set<String> fields, final int numTerms )
    throws Exception {

    IndexReader reader = searcher.getReader();
    IndexSchema schema = searcher.getSchema();
   
    // Walk the term enum and keep a priority queue for each map in our set
    Map<String,TopTermQueue> ttinfo = null;
    if( numTerms > 0 ) {
      ttinfo = getTopTerms(reader, fields, numTerms, null );
    }
    SimpleOrderedMap<Object> finfo = new SimpleOrderedMap<Object>();
    Collection<String> fieldNames = reader.getFieldNames(IndexReader.FieldOption.ALL);
    for (String fieldName : fieldNames) {
      if( fields != null && !fields.contains( fieldName ) ) {
        continue; // if a field is specified, only them
      }
     
      SimpleOrderedMap<Object> f = new SimpleOrderedMap<Object>();
     
      SchemaField sfield = schema.getFieldOrNull( fieldName );
      FieldType ftype = (sfield==null)?null:sfield.getType();

      f.add( "type", (ftype==null)?null:ftype.getTypeName() );
      f.add( "schema", getFieldFlags( sfield ) );
      if (sfield != null && schema.isDynamicField(sfield.getName()) && schema.getDynamicPattern(sfield.getName()) != null) {
        f.add("dynamicBase", schema.getDynamicPattern(sfield.getName()));
      }

      // If numTerms==0, the call is just asking for a quick field list
      if( ttinfo != null && sfield != null && sfield.indexed() ) {
        Query q = new ConstantScoreRangeQuery(fieldName,null,null,false,false);
View Full Code Here

  /**
   * {@inheritDoc}
   */
  protected NamedList doAnalysis(SolrQueryRequest req) throws Exception {
    FieldAnalysisRequest analysisRequest = resolveAnalysisRequest(req);
    IndexSchema indexSchema = req.getCore().getSchema();
    return handleAnalysisRequest(analysisRequest, indexSchema);
  }
View Full Code Here

    SolrParams params = req.getParams();
    if (!isHighlightingEnabled(params))
        return null;
    
     SolrIndexSearcher searcher = req.getSearcher();
     IndexSchema schema = searcher.getSchema();
     NamedList fragments = new SimpleOrderedMap();
     String[] fieldNames = getHighlightFields(query, req, defaultFields);
     Set<String> fset = new HashSet<String>();
    
     {
       // pre-fetch documents using the Searcher's doc cache
       for(String f : fieldNames) { fset.add(f); }
       // fetch unique key if one exists.
       SchemaField keyField = schema.getUniqueKeyField();
       if(null != keyField)
         fset.add(keyField.getName())
     }


    // Highlight each document
    DocIterator iterator = docs.iterator();
    for (int i = 0; i < docs.size(); i++) {
       int docId = iterator.nextDoc();
       Document doc = searcher.doc(docId, fset);
       NamedList docSummaries = new SimpleOrderedMap();
       for (String fieldName : fieldNames) {
          fieldName = fieldName.trim();
          String[] docTexts = doc.getValues(fieldName);
          if (docTexts == null) continue;
         
          TokenStream tstream = null;
          int numFragments = getMaxSnippets(fieldName, params);
          boolean mergeContiguousFragments = isMergeContiguousFragments(fieldName, params);

          String[] summaries = null;
          List<TextFragment> frags = new ArrayList<TextFragment>();
          TermOffsetsTokenStream tots = null;
          for (int j = 0; j < docTexts.length; j++) {
            // create TokenStream
            try {
              // attempt term vectors
              if( tots == null ) {
                TokenStream tvStream = TokenSources.getTokenStream(searcher.getReader(), docId, fieldName);
                if (tvStream != null) {
                  tots = new TermOffsetsTokenStream(tvStream);
                  tstream = tots.getMultiValuedTokenStream( docTexts[j].length() );
                } else {
                  // fall back to analyzer
                  tstream = createAnalyzerTStream(schema, fieldName, docTexts[j]);
                }
              }
            }
            catch (IllegalArgumentException e) {
              // fall back to analyzer
              tstream = createAnalyzerTStream(schema, fieldName, docTexts[j]);
            }
                        
            Highlighter highlighter;
            if (Boolean.valueOf(req.getParams().get(HighlightParams.USE_PHRASE_HIGHLIGHTER, "true"))) {
              // TODO: this is not always necessary - eventually we would like to avoid this wrap
              //       when it is not needed.
              tstream = new CachingTokenFilter(tstream);
             
              // get highlighter
              highlighter = getPhraseHighlighter(query, fieldName, req, (CachingTokenFilter) tstream);
              
              // after highlighter initialization, reset tstream since construction of highlighter already used it
              tstream.reset();
            }
            else {
              // use "the old way"
              highlighter = getHighlighter(query, fieldName, req);
            }
           
            int maxCharsToAnalyze = params.getFieldInt(fieldName,
                HighlightParams.MAX_CHARS,
                Highlighter.DEFAULT_MAX_CHARS_TO_ANALYZE);
            if (maxCharsToAnalyze < 0) {
              highlighter.setMaxDocCharsToAnalyze(docTexts[j].length());
            } else {
              highlighter.setMaxDocCharsToAnalyze(maxCharsToAnalyze);
            }

            try {
              TextFragment[] bestTextFragments = highlighter.getBestTextFragments(tstream, docTexts[j], mergeContiguousFragments, numFragments);
              for (int k = 0; k < bestTextFragments.length; k++) {
                if ((bestTextFragments[k] != null) && (bestTextFragments[k].getScore() > 0)) {
                  frags.add(bestTextFragments[k]);
                }
              }
            } catch (InvalidTokenOffsetsException e) {
              throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
            }
          }
          // sort such that the fragments with the highest score come first
          Collections.sort(frags, new Comparator<TextFragment>() {
            public int compare(TextFragment arg0, TextFragment arg1) {
              return Math.round(arg1.getScore() - arg0.getScore());
            }
          });
         
           // convert fragments back into text
           // TODO: we can include score and position information in output as snippet attributes
          if (frags.size() > 0) {
            ArrayList<String> fragTexts = new ArrayList<String>();
            for (TextFragment fragment: frags) {
              if ((fragment != null) && (fragment.getScore() > 0)) {
                fragTexts.add(fragment.toString());
              }
              if (fragTexts.size() >= numFragments) break;
            }
            summaries = fragTexts.toArray(new String[0]);
            if (summaries.length > 0)
            docSummaries.add(fieldName, summaries);
          }
           // no summeries made, copy text from alternate field
           if (summaries == null || summaries.length == 0) {
              String alternateField = req.getParams().getFieldParam(fieldName, HighlightParams.ALTERNATE_FIELD);
              if (alternateField != null && alternateField.length() > 0) {
                String[] altTexts = doc.getValues(alternateField);
                if (altTexts != null && altTexts.length > 0){
                  int alternateFieldLen = req.getParams().getFieldInt(fieldName, HighlightParams.ALTERNATE_FIELD_LENGTH,0);
                  if( alternateFieldLen <= 0 ){
                    docSummaries.add(fieldName, altTexts);
                  }
                  else{
                    List<String> altList = new ArrayList<String>();
                    int len = 0;
                    for( String altText: altTexts ){
                      altList.add( len + altText.length() > alternateFieldLen ?
                                   new String(altText.substring( 0, alternateFieldLen - len )) : altText );
                      len += altText.length();
                      if( len >= alternateFieldLen ) break;
                    }
                    docSummaries.add(fieldName, altList);
                  }
                }
              }
           }
        }
        String printId = schema.printableUniqueKey(doc);
        fragments.add(printId == null ? null : printId, docSummaries);
     }
     return fragments;
  }
View Full Code Here

    dataDir = SolrResourceLoader.normalizeDir(dataDir);

    log.info(logid+"Opening new SolrCore at " + resourceLoader.getInstanceDir() + ", dataDir="+dataDir);

    if (schema==null) {
      schema = new IndexSchema(config, IndexSchema.DEFAULT_SCHEMA_FILE, null);
    }

    //Initialize JMX
    if (config.jmxConfig.enabled) {
      infoRegistry = new JmxMonitoredMap<String, SolrInfoMBean>(name, config.jmxConfig);
View Full Code Here

    /* :TODO: potential optimization...
    * cache the Terms with the highest docFreq and try them first
    * don't enum if we get our max from them
    */

    IndexSchema schema = searcher.getSchema();
    IndexReader r = searcher.getReader();
    FieldType ft = schema.getFieldType(field);

    Set<CountPair<String,Integer>> counts
      = new HashSet<CountPair<String,Integer>>();

    if (0 <= limit) {
View Full Code Here

      SolrParams params = req.getParams();
     
      int flags = 0;
     
      SolrIndexSearcher s = req.getSearcher();
      IndexSchema schema = req.getSchema();
           
      Map<String,Float> queryFields = U.parseFieldBoosts(params.get(DMP.QF));
      Map<String,Float> phraseFields = U.parseFieldBoosts(params.get(DMP.PF));

      float tiebreaker = params.getFloat(DMP.TIE, 0.0f);
View Full Code Here

  public TestHarness(String dataDirectory,
                     String confFile,
                     String schemaFile) {
    try {
      SolrConfig.initConfig(confFile);
      core = new SolrCore(dataDirectory, new IndexSchema(schemaFile));
      builder = DocumentBuilderFactory.newInstance().newDocumentBuilder();
    } catch (Exception e) {
      throw new RuntimeException(e);
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.solr.schema.IndexSchema

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.