Package org.apache.hadoop.hbase.io.hfile

Examples of org.apache.hadoop.hbase.io.hfile.BlockCache


    this.metrics.compactionQueueSize.set(compactSplitThread
        .getCompactionQueueSize());
    this.metrics.flushQueueSize.set(cacheFlusher
        .getFlushQueueSize());

    BlockCache blockCache = cacheConfig.getBlockCache();
    if (blockCache != null) {
      this.metrics.blockCacheCount.set(blockCache.size());
      this.metrics.blockCacheFree.set(blockCache.getFreeSize());
      this.metrics.blockCacheSize.set(blockCache.getCurrentSize());
      CacheStats cacheStats = blockCache.getStats();
      this.metrics.blockCacheHitCount.set(cacheStats.getHitCount());
      this.metrics.blockCacheMissCount.set(cacheStats.getMissCount());
      this.metrics.blockCacheEvictedCount.set(blockCache.getEvictedCount());
      double ratio = blockCache.getStats().getHitRatio();
      int percent = (int) (ratio * 100);
      this.metrics.blockCacheHitRatio.set(percent);
      ratio = blockCache.getStats().getHitCachingRatio();
      percent = (int) (ratio * 100);
      this.metrics.blockCacheHitCachingRatio.set(percent);
    }
    float localityIndex = hdfsBlocksDistribution.getBlockLocalityIndex(
      getServerName().getHostname());
View Full Code Here


    new HRegionServerCommandLine(regionServerClass).doMain(args);
  }

  @Override
  public List<BlockCacheColumnFamilySummary> getBlockCacheColumnFamilySummaries() throws IOException {
    BlockCache c = new CacheConfig(this.conf).getBlockCache();
    return c.getBlockCacheColumnFamilySummaries(this.conf);
  }
View Full Code Here

    // Find a home for our files
    Path baseDir = new Path(new Path(this.testDir, "regionname"),"twoCOWEOC");

    // Grab the block cache and get the initial hit/miss counts
    BlockCache bc = new CacheConfig(conf).getBlockCache();
    assertNotNull(bc);
    CacheStats cs = bc.getStats();
    long startHit = cs.getHitCount();
    long startMiss = cs.getMissCount();
    long startEvicted = cs.getEvictedCount();

    // Let's write a StoreFile with three blocks, with cache on write off
View Full Code Here

   TEST_UTIL.flush();
  
   scan(ht, FAMILY);
   scan(ht2, FAMILY);
     
   BlockCache bc =
     new CacheConfig(TEST_UTIL.getConfiguration()).getBlockCache();
   List<BlockCacheColumnFamilySummary> bcs =
     bc.getBlockCacheColumnFamilySummaries(TEST_UTIL.getConfiguration());
   LOG.info("blockCacheSummary: " + bcs);

   assertEquals("blockCache summary has entries", 3, bcs.size());
  
   BlockCacheColumnFamilySummary e = bcs.get(0);
View Full Code Here

        tableName).getFromOnlineRegions(regionName);
    Store store = region.getStores().values().iterator().next();
    CacheConfig cacheConf = store.getCacheConfig();
    cacheConf.setCacheDataOnWrite(true);
    cacheConf.setEvictOnClose(true);
    BlockCache cache = cacheConf.getBlockCache();

    // establish baseline stats
    long startBlockCount = cache.getBlockCount();
    long startBlockHits = cache.getStats().getHitCount();
    long startBlockMiss = cache.getStats().getMissCount();

    // wait till baseline is stable, (minimal 500 ms)
    for (int i = 0; i < 5; i++) {
      Thread.sleep(100);
      if (startBlockCount != cache.getBlockCount()
          || startBlockHits != cache.getStats().getHitCount()
          || startBlockMiss != cache.getStats().getMissCount()) {
        startBlockCount = cache.getBlockCount();
        startBlockHits = cache.getStats().getHitCount();
        startBlockMiss = cache.getStats().getMissCount();
        i = -1;
      }
    }

    // insert data
    Put put = new Put(ROW);
    put.add(FAMILY, QUALIFIER, data);
    table.put(put);
    assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data));
    // data was in memstore so don't expect any changes
    assertEquals(startBlockCount, cache.getBlockCount());
    assertEquals(startBlockHits, cache.getStats().getHitCount());
    assertEquals(startBlockMiss, cache.getStats().getMissCount());
    // flush the data
    System.out.println("Flushing cache");
    region.flushcache();
    // expect one more block in cache, no change in hits/misses
    long expectedBlockCount = startBlockCount + 1;
    long expectedBlockHits = startBlockHits;
    long expectedBlockMiss = startBlockMiss;
    assertEquals(expectedBlockCount, cache.getBlockCount());
    assertEquals(expectedBlockHits, cache.getStats().getHitCount());
    assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
    // read the data and expect same blocks, one new hit, no misses
    assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data));
    assertEquals(expectedBlockCount, cache.getBlockCount());
    assertEquals(++expectedBlockHits, cache.getStats().getHitCount());
    assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
    // insert a second column, read the row, no new blocks, one new hit
    byte [] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
    byte [] data2 = Bytes.add(data, data);
    put = new Put(ROW);
    put.add(FAMILY, QUALIFIER2, data2);
    table.put(put);
    Result r = table.get(new Get(ROW));
    assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data));
    assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER2), data2));
    assertEquals(expectedBlockCount, cache.getBlockCount());
    assertEquals(++expectedBlockHits, cache.getStats().getHitCount());
    assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
    // flush, one new block
    System.out.println("Flushing cache");
    region.flushcache();
    assertEquals(++expectedBlockCount, cache.getBlockCount());
    assertEquals(expectedBlockHits, cache.getStats().getHitCount());
    assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
    // compact, net minus two blocks, two hits, no misses
    System.out.println("Compacting");
    assertEquals(2, store.getStorefilesCount());
    store.triggerMajorCompaction();
    region.compactStores();
    waitForStoreFileCount(store, 1, 10000); // wait 10 seconds max
    assertEquals(1, store.getStorefilesCount());
    expectedBlockCount -= 2; // evicted two blocks, cached none
    assertEquals(expectedBlockCount, cache.getBlockCount());
    expectedBlockHits += 2;
    assertEquals(expectedBlockMiss, cache.getStats().getMissCount());
    assertEquals(expectedBlockHits, cache.getStats().getHitCount());
    // read the row, this should be a cache miss because we don't cache data
    // blocks on compaction
    r = table.get(new Get(ROW));
    assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data));
    assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER2), data2));
    expectedBlockCount += 1; // cached one data block
    assertEquals(expectedBlockCount, cache.getBlockCount());
    assertEquals(expectedBlockHits, cache.getStats().getHitCount());
    assertEquals(++expectedBlockMiss, cache.getStats().getMissCount());
  }
View Full Code Here

    try {
      processTable(fs, tableDir, log, c, majorCompact);
    } finally {
       log.close();
       // TODO: is this still right?
       BlockCache bc = new CacheConfig(c).getBlockCache();
       if (bc != null) bc.shutdown();
    }
  }
View Full Code Here

      }
    } finally {
      storeFile.closeReader(true);
      exec.shutdown();

      BlockCache c = cacheConf.getBlockCache();
      if (c != null) {
        c.shutdown();
      }
    }
    LOG.info("Worker threads completed: " + numCompleted);
    LOG.info("Worker threads failed: " + numFailed);
    return true;
View Full Code Here

    readStoreFile(writer.getPath());
  }

  private void readStoreFile(Path path) throws IOException {
    CacheConfig cacheConf = store.getCacheConfig();
    BlockCache cache = cacheConf.getBlockCache();
    StoreFile sf = new StoreFile(fs, path, conf, cacheConf,
        BloomType.ROWCOL, null);
    HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader();
    try {
      // Open a scanner with (on read) caching disabled
      HFileScanner scanner = reader.getScanner(false, false);
      assertTrue(testDescription, scanner.seekTo());
      // Cribbed from io.hfile.TestCacheOnWrite
      long offset = 0;
      HFileBlock prevBlock = null;
      while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
        long onDiskSize = -1;
        if (prevBlock != null) {
          onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
        }
        // Flags: don't cache the block, use pread, this is not a compaction.
        // Also, pass null for expected block type to avoid checking it.
        HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
          false, null);
        BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
          offset);
        boolean isCached = cache.getBlock(blockCacheKey, true, false) != null;
        boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
        if (shouldBeCached != isCached) {
          throw new AssertionError(
            "shouldBeCached: " + shouldBeCached+ "\n" +
            "isCached: " + isCached + "\n" +
View Full Code Here

    // Find a home for our files (regiondir ("7e0102") and familyname).
    Path baseDir = new Path(new Path(this.testDir, "7e0102"),"twoCOWEOC");

    // Grab the block cache and get the initial hit/miss counts
    BlockCache bc = new CacheConfig(conf).getBlockCache();
    assertNotNull(bc);
    CacheStats cs = bc.getStats();
    long startHit = cs.getHitCount();
    long startMiss = cs.getMissCount();
    long startEvicted = cs.getEvictedCount();

    // Let's write a StoreFile with three blocks, with cache on write off
View Full Code Here

   TEST_UTIL.flush();
  
   scan(ht, FAMILY);
   scan(ht2, FAMILY);
     
   BlockCache bc =
     new CacheConfig(TEST_UTIL.getConfiguration()).getBlockCache();
   List<BlockCacheColumnFamilySummary> bcs =
     bc.getBlockCacheColumnFamilySummaries(TEST_UTIL.getConfiguration());
   LOG.info("blockCacheSummary: " + bcs);

   assertEquals("blockCache summary has entries", 2, bcs.size());

   BlockCacheColumnFamilySummary e = bcs.get(0);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.io.hfile.BlockCache

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.