Package org.apache.lucene.index

Examples of org.apache.lucene.index.DirectoryReader.leaves()


    // only add field-Term mapping that will actually have DocValues in the end.
    // therefore traverse the index terms and add what exists. this pertains to
    // multiple CLPs, as well as partitions
    DirectoryReader reader = DirectoryReader.open(dir);
    final Map<String,Term> fieldTerms = new HashMap<String,Term>();
    for (AtomicReaderContext context : reader.leaves()) {
      for (CategoryListParams clp : fip.getAllCategoryListParams()) {
        Terms terms = context.reader().terms(clp.field);
        if (terms != null) {
          TermsEnum te = terms.iterator(null);
          BytesRef termBytes = null;
View Full Code Here


    Thread[] threads = new Thread[3];
    for (int i = 0; i < threads.length; i++) {
      threads[i] = new Thread("CachedOrdsThread-" + i) {
        @Override
        public void run() {
          for (AtomicReaderContext context : reader.leaves()) {
            try {
              OrdinalsCache.getCachedOrds(context, FacetIndexingParams.DEFAULT.getCategoryListParams(new CategoryPath("A")));
            } catch (IOException e) {
              throw new RuntimeException(e);
            }
View Full Code Here

      CategoryListParams clp = indexingParams.getCategoryListParams(new CategoryPath(dimensions[i]));
      IntDecoder decoder = clp.createEncoder().createMatchingDecoder();
      iterators[i] = new DocValuesCategoryListIterator(clp.field, decoder);
    }
    MultiCategoryListIterator cli = new MultiCategoryListIterator(iterators);
    for (AtomicReaderContext context : indexReader.leaves()) {
      assertTrue("failed to init multi-iterator", cli.setNextReader(context));
      IntsRef ordinals = new IntsRef();
      final int maxDoc = context.reader().maxDoc();
      for (int i = 0; i < maxDoc; i++) {
        cli.getOrdinals(i, ordinals);
View Full Code Here

    DirectoryReader reader = readerManager.acquire();
    try {
      final BytesRef catTerm = new BytesRef(FacetsConfig.pathToString(categoryPath.components, categoryPath.length));
      TermsEnum termsEnum = null; // reuse
      DocsEnum docs = null; // reuse
      for (AtomicReaderContext ctx : reader.leaves()) {
        Terms terms = ctx.reader().terms(Consts.FULL);
        if (terms != null) {
          termsEnum = terms.iterator(termsEnum);
          if (termsEnum.seekExact(catTerm)) {
            // liveDocs=null because the taxonomy has no deletes
View Full Code Here

    boolean aborted = false;
    DirectoryReader reader = readerManager.acquire();
    try {
      TermsEnum termsEnum = null;
      DocsEnum docsEnum = null;
      for (AtomicReaderContext ctx : reader.leaves()) {
        Terms terms = ctx.reader().terms(Consts.FULL);
        if (terms != null) { // cannot really happen, but be on the safe side
          termsEnum = terms.iterator(termsEnum);
          while (termsEnum.next() != null) {
            if (!cache.isFull()) {
View Full Code Here

      final OrdinalMap ordinalMap = map;
      ordinalMap.setSize(size);
      int base = 0;
      TermsEnum te = null;
      DocsEnum docs = null;
      for (final AtomicReaderContext ctx : r.leaves()) {
        final AtomicReader ar = ctx.reader();
        final Terms terms = ar.terms(Consts.FULL);
        te = terms.iterator(te);
        while (te.next() != null) {
          FacetLabel cp = new FacetLabel(FacetsConfig.stringToPath(te.term().utf8ToString()));
View Full Code Here

    assertFalse(_fileSystem.exists(_path));
    assertFalse(_fileSystem.exists(_badRowIdsPath));
    assertTrue(_fileSystem.exists(_inUsePath));

    DirectoryReader reader = DirectoryReader.open(_mainDirectory);
    while (reader.leaves().size() > 1) {
      reader.close();
      _mainWriter.forceMerge(1, true);
      _mainWriter.commit();
      reader = DirectoryReader.open(_mainDirectory);
    }
View Full Code Here

  }
 
  private void migrateIndex(Directory indexDir, FacetIndexingParams fip) throws Exception {
    final Map<String,Term> fieldTerms = FacetsPayloadMigrationReader.buildFieldTermsMap(indexDir, fip);
    DirectoryReader reader = DirectoryReader.open(indexDir);
    List<AtomicReaderContext> leaves = reader.leaves();
    int numReaders = leaves.size();
    AtomicReader wrappedLeaves[] = new AtomicReader[numReaders];
    for (int i = 0; i < numReaders; i++) {
      wrappedLeaves[i] = new FacetsPayloadMigrationReader(leaves.get(i).reader(), fieldTerms);
    }
View Full Code Here

    DirectoryReader reader = readerManager.acquire();
    try {
      final BytesRef catTerm = new BytesRef(categoryPath.toString(delimiter));
      TermsEnum termsEnum = null; // reuse
      DocsEnum docs = null; // reuse
      for (AtomicReaderContext ctx : reader.leaves()) {
        Terms terms = ctx.reader().terms(Consts.FULL);
        if (terms != null) {
          termsEnum = terms.iterator(termsEnum);
          if (termsEnum.seekExact(catTerm, true)) {
            // liveDocs=null because the taxonomy has no deletes
View Full Code Here

    boolean aborted = false;
    DirectoryReader reader = readerManager.acquire();
    try {
      TermsEnum termsEnum = null;
      DocsEnum docsEnum = null;
      for (AtomicReaderContext ctx : reader.leaves()) {
        Terms terms = ctx.reader().terms(Consts.FULL);
        if (terms != null) { // cannot really happen, but be on the safe side
          termsEnum = terms.iterator(termsEnum);
          while (termsEnum.next() != null) {
            if (!cache.isFull()) {
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.