Package org.apache.lucene.search.FieldCache

Examples of org.apache.lucene.search.FieldCache.CacheEntry


    // any keys that we know result in more then one valId
    final Set<ReaderField> valMismatchKeys = new HashSet<ReaderField>();

    // iterate over all the cacheEntries to get the mappings we'll need
    for (int i = 0; i < cacheEntries.length; i++) {
      final CacheEntry item = cacheEntries[i];
      final Object val = item.getValue();

      if (val instanceof FieldCache.CreationPlaceholder)
        continue;

      final ReaderField rf = new ReaderField(item.getReaderKey(),
                                            item.getFieldName());

      final Integer valId = Integer.valueOf(System.identityHashCode(val));

      // indirect mapping, so the MapOfSet will dedup identical valIds for us
      valIdToItems.put(valId, item);
View Full Code Here


  public NamedList getStatistics() {
    NamedList stats = new SimpleOrderedMap();
    CacheEntry[] entries = FieldCache.DEFAULT.getCacheEntries();
    stats.add("entries_count", entries.length);
    for (int i = 0; i < entries.length; i++) {
      CacheEntry e = entries[i];
      stats.add("entry#" + i, e.toString());
    }

    Insanity[] insanity = checker.check(entries);

    stats.add("insanity_count", insanity.length);
View Full Code Here

    // any keys that we know result in more then one valId
    final Set<ReaderField> valMismatchKeys = new HashSet<ReaderField>();

    // iterate over all the cacheEntries to get the mappings we'll need
    for (int i = 0; i < cacheEntries.length; i++) {
      final CacheEntry item = cacheEntries[i];
      final Object val = item.getValue();

      // It's OK to have dup entries, where one is eg
      // float[] and the other is the Bits (from
      // getDocWithField())
      if (val instanceof Bits) {
        continue;
      }

      if (val instanceof FieldCache.CreationPlaceholder)
        continue;

      final ReaderField rf = new ReaderField(item.getReaderKey(),
                                            item.getFieldName());

      final Integer valId = Integer.valueOf(System.identityHashCode(val));

      // indirect mapping, so the MapOfSet will dedup identical valIds for us
      valIdToItems.put(valId, item);
View Full Code Here

  public NamedList getStatistics() {
    NamedList stats = new SimpleOrderedMap();
    CacheEntry[] entries = FieldCache.DEFAULT.getCacheEntries();
    stats.add("entries_count", entries.length);
    for (int i = 0; i < entries.length; i++) {
      CacheEntry e = entries[i];
      stats.add("entry#" + i, e.toString());
    }

    Insanity[] insanity = checker.check(entries);

    stats.add("insanity_count", insanity.length);
View Full Code Here

  public NamedList getStatistics() {
    NamedList stats = new SimpleOrderedMap();
    CacheEntry[] entries = FieldCache.DEFAULT.getCacheEntries();
    stats.add("entries_count", entries.length);
    for (int i = 0; i < entries.length; i++) {
      CacheEntry e = entries[i];
      stats.add("entry#" + i, e.toString());
    }

    Insanity[] insanity = checker.checkSanity(entries);

    stats.add("insanity_count", insanity.length);
    for (int i = 0; i < insanity.length; i++) {

      // we only estimate the size of insane entries
      for (CacheEntry e : insanity[i].getCacheEntries()) {
        // don't re-estimate if we've already done it.
        if (null == e.getEstimatedSize()) e.estimateSize();
      }
     
      stats.add("insanity#" + i, insanity[i].toString());
    }
    return stats;
View Full Code Here

    // any keys that we know result in more then one valId
    final Set<ReaderField> valMismatchKeys = new HashSet<>();

    // iterate over all the cacheEntries to get the mappings we'll need
    for (int i = 0; i < cacheEntries.length; i++) {
      final CacheEntry item = cacheEntries[i];
      final Object val = item.getValue();

      // It's OK to have dup entries, where one is eg
      // float[] and the other is the Bits (from
      // getDocWithField())
      if (val != null && "BitsEntry".equals(val.getClass().getSimpleName())) {
        continue;
      }

      if (val instanceof FieldCache.CreationPlaceholder)
        continue;

      final ReaderField rf = new ReaderField(item.getReaderKey(),
                                            item.getFieldName());

      final Integer valId = Integer.valueOf(System.identityHashCode(val));

      // indirect mapping, so the MapOfSet will dedup identical valIds for us
      valIdToItems.put(valId, item);
View Full Code Here

    // any keys that we know result in more then one valId
    final Set<ReaderField> valMismatchKeys = new HashSet<ReaderField>();

    // iterate over all the cacheEntries to get the mappings we'll need
    for (int i = 0; i < cacheEntries.length; i++) {
      final CacheEntry item = cacheEntries[i];
      final Object val = item.getValue();

      if (val instanceof FieldCache.CreationPlaceholder)
        continue;

      final ReaderField rf = new ReaderField(item.getReaderKey(),
                                            item.getFieldName());

      final Integer valId = Integer.valueOf(System.identityHashCode(val));

      // indirect mapping, so the MapOfSet will dedup identical valIds for us
      valIdToItems.put(valId, item);
View Full Code Here

    // any keys that we know result in more then one valId
    final Set valMismatchKeys = new HashSet();

    // iterate over all the cacheEntries to get the mappings we'll need
    for (int i = 0; i < cacheEntries.length; i++) {
      final CacheEntry item = cacheEntries[i];
      final Object val = item.getValue();

      if (val instanceof FieldCache.CreationPlaceholder)
        continue;

      final ReaderField rf = new ReaderField(item.getReaderKey(),
                                            item.getFieldName());

      final Integer valId = new Integer(System.identityHashCode(val));

      // indirect mapping, so the MapOfSet will dedup identical valIds for us
      valIdToItems.put(valId, item);
View Full Code Here

    // any keys that we know result in more then one valId
    final Set<ReaderField> valMismatchKeys = new HashSet<>();

    // iterate over all the cacheEntries to get the mappings we'll need
    for (int i = 0; i < cacheEntries.length; i++) {
      final CacheEntry item = cacheEntries[i];
      final Object val = item.getValue();

      // It's OK to have dup entries, where one is eg
      // float[] and the other is the Bits (from
      // getDocWithField())
      if (val instanceof Bits) {
        continue;
      }

      if (val instanceof FieldCache.CreationPlaceholder)
        continue;

      final ReaderField rf = new ReaderField(item.getReaderKey(),
                                            item.getFieldName());

      final Integer valId = Integer.valueOf(System.identityHashCode(val));

      // indirect mapping, so the MapOfSet will dedup identical valIds for us
      valIdToItems.put(valId, item);
View Full Code Here

    // any keys that we know result in more then one valId
    final Set<ReaderField> valMismatchKeys = new HashSet<ReaderField>();

    // iterate over all the cacheEntries to get the mappings we'll need
    for (int i = 0; i < cacheEntries.length; i++) {
      final CacheEntry item = cacheEntries[i];
      final Object val = item.getValue();

      // It's OK to have dup entries, where one is eg
      // float[] and the other is the Bits (from
      // getDocWithField())
      if (val instanceof Bits) {
        continue;
      }

      if (val instanceof FieldCache.CreationPlaceholder)
        continue;

      final ReaderField rf = new ReaderField(item.getReaderKey(),
                                            item.getFieldName());

      final Integer valId = Integer.valueOf(System.identityHashCode(val));

      // indirect mapping, so the MapOfSet will dedup identical valIds for us
      valIdToItems.put(valId, item);
View Full Code Here

TOP

Related Classes of org.apache.lucene.search.FieldCache.CacheEntry

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.