Package org.apache.hadoop.hbase.io.hfile

Examples of org.apache.hadoop.hbase.io.hfile.LruBlockCache


    System.err.println("Testing encoded seekers for encoding : " + encoding + ", encodeOnDisk : "
        + encodeOnDisk + ", includeTags : " + includeTags + ", compressTags : " + compressTags);
    if(includeTags) {
      testUtil.getConfiguration().setInt(HFile.FORMAT_VERSION_KEY, 3);
    }
    LruBlockCache cache =
      (LruBlockCache)new CacheConfig(testUtil.getConfiguration()).getBlockCache();
    cache.clearCache();
    // Need to disable default row bloom filter for this test to pass.
    HColumnDescriptor hcd = (new HColumnDescriptor(CF_NAME)).setMaxVersions(MAX_VERSIONS).
        setDataBlockEncoding(encoding).
        setBlocksize(BLOCK_SIZE).
        setBloomFilterType(BloomType.NONE).
        setCompressTags(compressTags);
    HRegion region = testUtil.createTestRegion(TABLE_NAME, hcd);

    //write the data, but leave some in the memstore
    doPuts(region);

    //verify correctness when memstore contains data
    doGets(region);

    //verify correctness again after compacting
    region.compactStores();
    doGets(region);


    Map<DataBlockEncoding, Integer> encodingCounts = cache.getEncodingCountsForTest();

    // Ensure that compactions don't pollute the cache with unencoded blocks
    // in case of in-cache-only encoding.
    System.err.println("encodingCounts=" + encodingCounts);
    assertEquals(1, encodingCounts.size());
View Full Code Here


  }

  @Test
  public void testEncodedSeeker() throws IOException {
    System.err.println("Testing encoded seekers for encoding " + encoding);
    LruBlockCache cache =
      (LruBlockCache)new CacheConfig(testUtil.getConfiguration()).getBlockCache();
    cache.clearCache();
    // Need to disable default row bloom filter for this test to pass.
    HColumnDescriptor hcd = (new HColumnDescriptor(CF_NAME)).setMaxVersions(MAX_VERSIONS).
        setDataBlockEncoding(encoding).
        setEncodeOnDisk(encodeOnDisk).
        setBlocksize(BLOCK_SIZE).
        setBloomFilterType(BloomType.NONE);
    HRegion region = testUtil.createTestRegion(TABLE_NAME, hcd);

    //write the data, but leave some in the memstore
    doPuts(region);

    //verify correctness when memstore contains data
    doGets(region);

    //verify correctness again after compacting
    region.compactStores();
    doGets(region);


    Map<DataBlockEncoding, Integer> encodingCounts = cache.getEncodingCountsForTest();

    // Ensure that compactions don't pollute the cache with unencoded blocks
    // in case of in-cache-only encoding.
    System.err.println("encodingCounts=" + encodingCounts);
    assertEquals(1, encodingCounts.size());
View Full Code Here

  }

  @Test
  public void testEncodedSeeker() throws IOException {
    System.err.println("Testing encoded seekers for encoding " + encoding);
    LruBlockCache cache = (LruBlockCache)
    new CacheConfig(testUtil.getConfiguration()).getBlockCache();
    cache.clearCache();

    HRegion region = testUtil.createTestRegion(
        TABLE_NAME, new HColumnDescriptor(CF_NAME)
            .setMaxVersions(MAX_VERSIONS)
            .setDataBlockEncoding(encoding)
            .setEncodeOnDisk(encodeOnDisk)
    );

    LoadTestKVGenerator dataGenerator = new LoadTestKVGenerator(
        MIN_VALUE_SIZE, MAX_VALUE_SIZE);

    // Write
    for (int i = 0; i < NUM_ROWS; ++i) {
      byte[] key = LoadTestKVGenerator.md5PrefixedKey(i).getBytes();
      for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
        Put put = new Put(key);
        byte[] col = Bytes.toBytes(String.valueOf(j));
        byte[] value = dataGenerator.generateRandomSizeValue(key, col);
        put.add(CF_BYTES, col, value);
        region.put(put);
      }
      if (i % NUM_ROWS_PER_FLUSH == 0) {
        region.flushcache();
      }
    }

    for (int doneCompaction = 0; doneCompaction <= 1; ++doneCompaction) {
      // Read
      for (int i = 0; i < NUM_ROWS; ++i) {
        byte[] rowKey = LoadTestKVGenerator.md5PrefixedKey(i).getBytes();
        for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
          if (VERBOSE) {
            System.err.println("Reading row " + i + ", column " +  j);
          }
          final String qualStr = String.valueOf(j);
          final byte[] qualBytes = Bytes.toBytes(qualStr);
          Get get = new Get(rowKey);
          get.addColumn(CF_BYTES, qualBytes);
          Result result = region.get(get);
          assertEquals(1, result.size());
          byte[] value = result.getValue(CF_BYTES, qualBytes);
          assertTrue(LoadTestKVGenerator.verify(value, rowKey, qualBytes));
        }
      }

      if (doneCompaction == 0) {
        // Compact, then read again at the next loop iteration.
        region.compactStores();
      }
    }

    Map<DataBlockEncoding, Integer> encodingCounts =
        cache.getEncodingCountsForTest();

    // Ensure that compactions don't pollute the cache with unencoded blocks
    // in case of in-cache-only encoding.
    System.err.println("encodingCounts=" + encodingCounts);
    assertEquals(1, encodingCounts.size());
View Full Code Here

      } catch (Exception e) {
        e.printStackTrace();
      }
    }
    // Send cache a shutdown.
    LruBlockCache c = (LruBlockCache)StoreFile.getBlockCache(this.conf);
    if (c != null) c.shutdown();

    // Send interrupts to wake up threads if sleeping so they notice shutdown.
    // TODO: Should we check they are alive?  If OOME could have exited already
    cacheFlusher.interruptIfNecessary();
    hlogFlusher.interrupt();
View Full Code Here

    this.metrics.stores.set(stores);
    this.metrics.storefiles.set(storefiles);
    this.metrics.memstoreSizeMB.set((int)(memstoreSize/(1024*1024)));
    this.metrics.storefileIndexSizeMB.set((int)(storefileIndexSize/(1024*1024)));

    LruBlockCache lruBlockCache = (LruBlockCache)StoreFile.getBlockCache(conf);
    if (lruBlockCache != null) {
      this.metrics.blockCacheCount.set(lruBlockCache.size());
      this.metrics.blockCacheFree.set(lruBlockCache.getFreeSize());
      this.metrics.blockCacheSize.set(lruBlockCache.getCurrentSize());
      double ratio = lruBlockCache.getStats().getHitRatio();
      int percent = (int) (ratio * 100);
      this.metrics.blockCacheHitRatio.set(percent);
    }
  }
View Full Code Here

      } catch (Exception e) {
        e.printStackTrace();
      }
    }
    // Send cache a shutdown.
    LruBlockCache c = (LruBlockCache)StoreFile.getBlockCache(this.conf);
    if (c != null) c.shutdown();

    // Send interrupts to wake up threads if sleeping so they notice shutdown.
    // TODO: Should we check they are alive?  If OOME could have exited already
    cacheFlusher.interruptIfNecessary();
    hlogFlusher.interrupt();
View Full Code Here

    this.metrics.memstoreSizeMB.set((int)(memstoreSize/(1024*1024)));
    this.metrics.storefileIndexSizeMB.set((int)(storefileIndexSize/(1024*1024)));
    this.metrics.compactionQueueSize.set(compactSplitThread.
      getCompactionQueueSize());

    LruBlockCache lruBlockCache = (LruBlockCache)StoreFile.getBlockCache(conf);
    if (lruBlockCache != null) {
      this.metrics.blockCacheCount.set(lruBlockCache.size());
      this.metrics.blockCacheFree.set(lruBlockCache.getFreeSize());
      this.metrics.blockCacheSize.set(lruBlockCache.getCurrentSize());
      double ratio = lruBlockCache.getStats().getHitRatio();
      int percent = (int) (ratio * 100);
      this.metrics.blockCacheHitRatio.set(percent);
    }
  }
View Full Code Here

      } catch (Exception e) {
        e.printStackTrace();
      }
    }
    // Send cache a shutdown.
    LruBlockCache c = (LruBlockCache)StoreFile.getBlockCache(this.conf);
    if (c != null) c.shutdown();

    // Send interrupts to wake up threads if sleeping so they notice shutdown.
    // TODO: Should we check they are alive?  If OOME could have exited already
    cacheFlusher.interruptIfNecessary();
    compactSplitThread.interruptIfNecessary();
View Full Code Here

    this.metrics.memstoreSizeMB.set((int)(memstoreSize/(1024*1024)));
    this.metrics.storefileIndexSizeMB.set((int)(storefileIndexSize/(1024*1024)));
    this.metrics.compactionQueueSize.set(compactSplitThread.
      getCompactionQueueSize());

    LruBlockCache lruBlockCache = (LruBlockCache)StoreFile.getBlockCache(conf);
    if (lruBlockCache != null) {
      this.metrics.blockCacheCount.set(lruBlockCache.size());
      this.metrics.blockCacheFree.set(lruBlockCache.getFreeSize());
      this.metrics.blockCacheSize.set(lruBlockCache.getCurrentSize());
      double ratio = lruBlockCache.getStats().getHitRatio();
      int percent = (int) (ratio * 100);
      this.metrics.blockCacheHitRatio.set(percent);
    }
  }
View Full Code Here

    // Calculate the amount of heap to give the heap.
    MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
    long cacheSize = (long)(mu.getMax() * cachePercentage);
    LOG.info("Allocating LruBlockCache with maximum size " +
      StringUtils.humanReadableInt(cacheSize));
    hfileBlockCache = new LruBlockCache(cacheSize, DEFAULT_BLOCKSIZE_SMALL);
    return hfileBlockCache;
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.io.hfile.LruBlockCache

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.