Examples of Terms


Examples of com.esri.ontology.service.catalog.Terms

   * @param selection selection criteria
   * @return collection of terms
   */
  public Terms search(QueryCriteria queryCriteria, Selection selection) {

    Terms terms = new Terms();

    OntModelUtil ontModelUtil = new OntModelUtil(ontCtx.getModel());

    OntClass search = ontCtx.getModel().getOntClass(
      ontModelUtil.getCode(queryCriteria.getTerm(), locale));

    if (search == null) {
      log.fine("Query: " + queryCriteria + " returned no results.");
      return terms;
    }

    // process categories
    if (selection.isCategories()) {
      processCategories(ontModelUtil, queryCriteria, search, terms);
    }

    // process neighbors
    if (selection.isNeighbors()) {
      processNeighbors(ontModelUtil, queryCriteria, search, terms);

    }

    terms.removeByThreshold(queryCriteria.getThreshold());
    terms.sortByCount();

    return terms;
  }
View Full Code Here

Examples of com.senseidb.search.client.req.Terms

        return (Term) new Term(value).setField(field);
    }

    public static Selection terms(String field, List<String> values,
            List<String> excludes, Operator op) {
        return new Terms(values, excludes, op).setField(field);
    }
View Full Code Here

Examples of org.apache.lucene.index.Terms

    }


  private void addTerms(IndexReader reader, FieldVals f) throws IOException {
    if (f.queryString == null) return;
    final Terms terms = MultiFields.getTerms(reader, f.fieldName);
    if (terms == null) {
      return;
    }
    TokenStream ts = analyzer.tokenStream(f.fieldName, f.queryString);
    try {
View Full Code Here

Examples of org.apache.lucene.index.Terms

    RandomIndexWriter w = new RandomIndexWriter(random(), dir);
    createRandomIndex(atLeast(50), w, random().nextLong());
    DirectoryReader reader = w.getReader();
    AtomicReader wrapper = SlowCompositeReaderWrapper.wrap(reader);
    String field = "body";
    Terms terms = wrapper.terms(field);
    PriorityQueue<TermAndFreq> lowFreqQueue = new PriorityQueue<CommonTermsQueryTest.TermAndFreq>(
        5) {
     
      @Override
      protected boolean lessThan(TermAndFreq a, TermAndFreq b) {
        return a.freq > b.freq;
      }
     
    };
    PriorityQueue<TermAndFreq> highFreqQueue = new PriorityQueue<CommonTermsQueryTest.TermAndFreq>(
        5) {
     
      @Override
      protected boolean lessThan(TermAndFreq a, TermAndFreq b) {
        return a.freq < b.freq;
      }
     
    };
    try {
      TermsEnum iterator = terms.iterator(null);
      while (iterator.next() != null) {
        if (highFreqQueue.size() < 5) {
          highFreqQueue.add(new TermAndFreq(
              BytesRef.deepCopyOf(iterator.term()), iterator.docFreq()));
          lowFreqQueue.add(new TermAndFreq(
View Full Code Here

Examples of org.apache.lucene.index.Terms

      return iterator;
    }

    @Override
    public Terms terms(String field) throws IOException {
      Terms terms = in.terms(field);
      return terms == null ? null : new AssertingAtomicReader.AssertingTerms(terms);
    }
View Full Code Here

Examples of org.apache.lucene.index.Terms

    for (String fieldName : fields) {
      if (fieldName.equals(DocMaker.ID_FIELD) || fieldName.equals(DocMaker.DATE_MSEC_FIELD) || fieldName.equals(DocMaker.TIME_SEC_FIELD)) {
        continue;
      }
      Terms terms = fields.terms(fieldName);
      if (terms == null) {
        continue;
      }
      TermsEnum termsEnum = terms.iterator(null);
      DocsEnum docs = null;
      while(termsEnum.next() != null) {
        docs = _TestUtil.docs(random(), termsEnum, MultiFields.getLiveDocs(reader), docs, DocsEnum.FLAG_FREQS);
        while(docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
          totalTokenCount2 += docs.freq();
View Full Code Here

Examples of org.apache.lucene.index.Terms

  // Returns a DocsEnum, but randomly sometimes uses a
  // DocsAndFreqsEnum, DocsAndPositionsEnum.  Returns null
  // if field/term doesn't exist:
  public static DocsEnum docs(Random random, IndexReader r, String field, BytesRef term, Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
    final Terms terms = MultiFields.getTerms(r, field);
    if (terms == null) {
      return null;
    }
    final TermsEnum termsEnum = terms.iterator(null);
    if (!termsEnum.seekExact(term)) {
      return null;
    }
    return docs(random, termsEnum, liveDocs, reuse, flags);
  }
View Full Code Here

Examples of org.apache.lucene.index.Terms

      if (termVectorsToFetch != null && termVectorsToFetch.size() > 0) {
        Map<String, List<BoboTerm>> tvMap = new HashMap<String, List<BoboTerm>>();
        hit.setTermVectorMap(tvMap);
        Fields fds = reader.getTermVectors(fdoc.doc);
        for (String field : termVectorsToFetch) {
          Terms terms = fds.terms(field);
          if (terms == null) {
            continue;
          }
          TermsEnum termsEnum = terms.iterator(null);
          BytesRef text;
          DocsAndPositionsEnum docsAndPositions = null;
          List<BoboTerm> boboTermList = new ArrayList<BoboTerm>();
          while ((text = termsEnum.next()) != null) {
            BoboTerm boboTerm = new BoboTerm();
View Full Code Here

Examples of org.apache.lucene.index.Terms

    try {
      final BytesRef catTerm = new BytesRef(categoryPath.toString(delimiter));
      TermsEnum termsEnum = null; // reuse
      DocsEnum docs = null; // reuse
      for (AtomicReaderContext ctx : reader.leaves()) {
        Terms terms = ctx.reader().terms(Consts.FULL);
        if (terms != null) {
          termsEnum = terms.iterator(termsEnum);
          if (termsEnum.seekExact(catTerm)) {
            // liveDocs=null because the taxonomy has no deletes
            docs = termsEnum.docs(null, docs, 0 /* freqs not required */);
            // if the term was found, we know it has exactly one document.
            doc = docs.nextDoc() + ctx.docBase;
View Full Code Here

Examples of org.apache.lucene.index.Terms

    DirectoryReader reader = readerManager.acquire();
    try {
      TermsEnum termsEnum = null;
      DocsEnum docsEnum = null;
      for (AtomicReaderContext ctx : reader.leaves()) {
        Terms terms = ctx.reader().terms(Consts.FULL);
        if (terms != null) { // cannot really happen, but be on the safe side
          termsEnum = terms.iterator(termsEnum);
          while (termsEnum.next() != null) {
            if (!cache.isFull()) {
              BytesRef t = termsEnum.term();
              // Since we guarantee uniqueness of categories, each term has exactly
              // one document. Also, since we do not allow removing categories (and
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.