Examples of HFileBlock


Examples of org.apache.hadoop.hbase.io.hfile.HFileBlock

      // Open a scanner with (on read) caching disabled
      HFileScanner scanner = reader.getScanner(false, false);
      assertTrue(testDescription, scanner.seekTo());
      // Cribbed from io.hfile.TestCacheOnWrite
      long offset = 0;
      HFileBlock prevBlock = null;
      while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
        long onDiskSize = -1;
        if (prevBlock != null) {
          onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
        }
        // Flags: don't cache the block, use pread, this is not a compaction.
        // Also, pass null for expected block type to avoid checking it.
        HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
          false, null);
        BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
          offset);
        boolean isCached = cache.getBlock(blockCacheKey, true, false) != null;
        boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
        if (shouldBeCached != isCached) {
          throw new AssertionError(
            "shouldBeCached: " + shouldBeCached+ "\n" +
            "isCached: " + isCached + "\n" +
            "Test description: " + testDescription + "\n" +
            "block: " + block + "\n" +
            "blockCacheKey: " + blockCacheKey);
        }
        prevBlock = block;
        offset += block.getOnDiskSizeWithHeader();
      }
    } finally {
      reader.close();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.HFileBlock

      // Open a scanner with (on read) caching disabled
      HFileScanner scanner = reader.getScanner(false, false);
      assertTrue(testDescription, scanner.seekTo());
      // Cribbed from io.hfile.TestCacheOnWrite
      long offset = 0;
      HFileBlock prevBlock = null;
      while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
        long onDiskSize = -1;
        if (prevBlock != null) {
          onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
        }
        // Flags: don't cache the block, use pread, this is not a compaction.
        // Also, pass null for expected block type to avoid checking it.
        HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
          false, null);
        BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
          offset);
        boolean isCached = cache.getBlock(blockCacheKey, true, false) != null;
        boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
        if (shouldBeCached != isCached) {
          throw new AssertionError(
            "shouldBeCached: " + shouldBeCached+ "\n" +
            "isCached: " + isCached + "\n" +
            "Test description: " + testDescription + "\n" +
            "block: " + block + "\n" +
            "blockCacheKey: " + blockCacheKey);
        }
        prevBlock = block;
        offset += block.getOnDiskSizeWithHeader();
      }
    } finally {
      reader.close();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.HFileBlock

      // Open a scanner with (on read) caching disabled
      HFileScanner scanner = reader.getScanner(false, false);
      assertTrue(testDescription, scanner.seekTo());
      // Cribbed from io.hfile.TestCacheOnWrite
      long offset = 0;
      HFileBlock prevBlock = null;
      while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
        long onDiskSize = -1;
        if (prevBlock != null) {
          onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
        }
        // Flags: don't cache the block, use pread, this is not a compaction.
        // Also, pass null for expected block type to avoid checking it.
        HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
          false, null);
        BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
          offset);
        boolean isCached = cache.getBlock(blockCacheKey, true, false) != null;
        boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
        if (shouldBeCached != isCached) {
          throw new AssertionError(
            "shouldBeCached: " + shouldBeCached+ "\n" +
            "isCached: " + isCached + "\n" +
            "Test description: " + testDescription + "\n" +
            "block: " + block + "\n" +
            "blockCacheKey: " + blockCacheKey);
        }
        prevBlock = block;
        offset += block.getOnDiskSizeWithHeader();
      }
    } finally {
      reader.close();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.HFileBlock

      // Open a scanner with (on read) caching disabled
      HFileScanner scanner = reader.getScanner(false, false);
      assertTrue(testDescription, scanner.seekTo());
      // Cribbed from io.hfile.TestCacheOnWrite
      long offset = 0;
      HFileBlock prevBlock = null;
      while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
        long onDiskSize = -1;
        if (prevBlock != null) {
          onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
        }
        // Flags: don't cache the block, use pread, this is not a compaction.
        // Also, pass null for expected block type to avoid checking it.
        HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
          false, null);
        BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
          offset);
        boolean isCached = cache.getBlock(blockCacheKey, true, false) != null;
        boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
        if (shouldBeCached != isCached) {
          throw new AssertionError(
            "shouldBeCached: " + shouldBeCached+ "\n" +
            "isCached: " + isCached + "\n" +
            "Test description: " + testDescription + "\n" +
            "block: " + block + "\n" +
            "blockCacheKey: " + blockCacheKey);
        }
        prevBlock = block;
        offset += block.getOnDiskSizeWithHeader();
      }
    } finally {
      reader.close();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.HFileBlock

    int block = index.rootBlockContainingKey(key, keyOffset, keyLength);
    if (block < 0) {
      result = false; // This key is not in the file.
    } else {
      HFileBlock bloomBlock;
      try {
        // We cache the block and use a positional read.
        bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
            index.getRootBlockDataSize(block), true, true, false,
            BlockType.BLOOM_CHUNK);
      } catch (IOException ex) {
        // The Bloom filter is broken, turn it off.
        throw new IllegalArgumentException(
            "Failed to load Bloom block for key "
                + Bytes.toStringBinary(key, keyOffset, keyLength), ex);
      }

      ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
      result = ByteBloomFilter.contains(key, keyOffset, keyLength,
          bloomBuf.array(), bloomBuf.arrayOffset() + bloomBlock.headerSize(),
          bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
    }

    if (numQueriesPerChunk != null && block >= 0) {
      // Update statistics. Only used in unit tests.
      ++numQueriesPerChunk[block];
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.HFileBlock

    int block = index.rootBlockContainingKey(key, keyOffset, keyLength);
    if (block < 0) {
      result = false; // This key is not in the file.
    } else {
      HFileBlock bloomBlock;
      try {
        // We cache the block and use a positional read.
        bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
            index.getRootBlockDataSize(block), true, true, false);
      } catch (IOException ex) {
        // The Bloom filter is broken, turn it off.
        throw new IllegalArgumentException(
            "Failed to load Bloom block for key "
                + Bytes.toStringBinary(key, keyOffset, keyLength), ex);
      }

      ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
      result = ByteBloomFilter.contains(key, keyOffset, keyLength,
          bloomBuf.array(), bloomBuf.arrayOffset() + HFileBlock.HEADER_SIZE,
          bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
    }

    if (numQueriesPerChunk != null && block >= 0) {
      // Update statistics. Only used in unit tests.
      ++numQueriesPerChunk[block];
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.HFileBlock

      // Open a scanner with (on read) caching disabled
      HFileScanner scanner = reader.getScanner(false, false);
      assertTrue(testDescription, scanner.seekTo());
      // Cribbed from io.hfile.TestCacheOnWrite
      long offset = 0;
      HFileBlock prevBlock = null;
      while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
        long onDiskSize = -1;
        if (prevBlock != null) {
          onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
        }
        // Flags: don't cache the block, use pread, this is not a compaction.
        // Also, pass null for expected block type to avoid checking it.
        HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
          false, null);
        BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
          offset);
        boolean isCached = cache.getBlock(blockCacheKey, true, false) != null;
        boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
        if (shouldBeCached != isCached) {
          throw new AssertionError(
            "shouldBeCached: " + shouldBeCached+ "\n" +
            "isCached: " + isCached + "\n" +
            "Test description: " + testDescription + "\n" +
            "block: " + block + "\n" +
            "blockCacheKey: " + blockCacheKey);
        }
        prevBlock = block;
        offset += block.getOnDiskSizeWithHeader();
      }
    } finally {
      reader.close();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.HFileBlock

    int block = index.rootBlockContainingKey(key, keyOffset, keyLength);
    if (block < 0) {
      result = false; // This key is not in the file.
    } else {
      HFileBlock bloomBlock;
      try {
        // We cache the block and use a positional read.
        bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
            index.getRootBlockDataSize(block), true, true, false, true,
            BlockType.BLOOM_CHUNK);
      } catch (IOException ex) {
        // The Bloom filter is broken, turn it off.
        throw new IllegalArgumentException(
            "Failed to load Bloom block for key "
                + Bytes.toStringBinary(key, keyOffset, keyLength), ex);
      }

      ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
      result = ByteBloomFilter.contains(key, keyOffset, keyLength,
          bloomBuf.array(), bloomBuf.arrayOffset() + bloomBlock.headerSize(),
          bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
    }

    if (numQueriesPerChunk != null && block >= 0) {
      // Update statistics. Only used in unit tests.
      ++numQueriesPerChunk[block];
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.HFileBlock

    int block = index.rootBlockContainingKey(key, keyOffset,
        keyLength);
    if (block < 0) {
      result = false; // This key is not in the file.
    } else {
      HFileBlock bloomBlock;
      try {
        // We cache the block and use a positional read.
        bloomBlock = reader.readBlock(index.getRootBlockOffset(block),
            index.getRootBlockDataSize(block), true, true, false, true,
            BlockType.BLOOM_CHUNK, null);
      } catch (IOException ex) {
        // The Bloom filter is broken, turn it off.
        throw new IllegalArgumentException(
            "Failed to load Bloom block for key "
                + Bytes.toStringBinary(key, keyOffset, keyLength), ex);
      }

      ByteBuffer bloomBuf = bloomBlock.getBufferReadOnly();
      result = ByteBloomFilter.contains(key, keyOffset, keyLength,
          bloomBuf.array(), bloomBuf.arrayOffset() + bloomBlock.headerSize(),
          bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount);
    }

    if (numQueriesPerChunk != null && block >= 0) {
      // Update statistics. Only used in unit tests.
      ++numQueriesPerChunk[block];
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.HFileBlock

      // Open a scanner with (on read) caching disabled
      HFileScanner scanner = reader.getScanner(false, false);
      assertTrue(testDescription, scanner.seekTo());
      // Cribbed from io.hfile.TestCacheOnWrite
      long offset = 0;
      HFileBlock prevBlock = null;
      while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
        long onDiskSize = -1;
        if (prevBlock != null) {
          onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
        }
        // Flags: don't cache the block, use pread, this is not a compaction.
        // Also, pass null for expected block type to avoid checking it.
        HFileBlock block = reader.readBlock(offset, onDiskSize, false, true,
          false, true, null, DataBlockEncoding.NONE);
        BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(),
          offset);
        boolean isCached = cache.getBlock(blockCacheKey, true, false, true) != null;
        boolean shouldBeCached = cowType.shouldBeCached(block.getBlockType());
        if (shouldBeCached != isCached) {
          throw new AssertionError(
            "shouldBeCached: " + shouldBeCached+ "\n" +
            "isCached: " + isCached + "\n" +
            "Test description: " + testDescription + "\n" +
            "block: " + block + "\n" +
            "blockCacheKey: " + blockCacheKey);
        }
        prevBlock = block;
        offset += block.getOnDiskSizeWithHeader();
      }
    } finally {
      reader.close();
    }
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.