Examples of CacheConfig


Examples of org.apache.hadoop.hbase.io.hfile.CacheConfig

        byte[] startKey, byte[] endKey, int numRows) throws IOException {

      HFile.Writer writer = null;
      long now = System.currentTimeMillis();
      try {
        writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
            .withPath(fs, path)
            .withComparator(KeyValue.KEY_COMPARATOR)
            .create();
        // subtract 2 since numRows doesn't include boundary keys
        for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, true, numRows-2)) {
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.CacheConfig

  private static void createHFile(
      Configuration conf,
      FileSystem fs, Path path,
      byte[] family, byte[] qualifier) throws IOException {
    HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf))
        .withPath(fs, path)
        .withComparator(KeyValue.KEY_COMPARATOR)
        .create();
    long now = System.currentTimeMillis();
    try {
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.CacheConfig

                                    .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf))
                                    .withBlockSize(blockSize);
        contextBuilder.withDataBlockEncoding(encoding);
        HFileContext hFileContext = contextBuilder.build();
                                   
        wl.writer = new StoreFile.WriterBuilder(conf, new CacheConfig(tempConf), fs)
            .withOutputDir(familydir).withBloomType(bloomType).withComparator(KeyValue.COMPARATOR)
            .withFileContext(hFileContext)
            .build();

        this.writers.put(family, wl);
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.CacheConfig

  }

  private int verifyHFile(Path p) throws IOException {
    Configuration conf = util.getConfiguration();
    HFile.Reader reader = HFile.createReader(
        p.getFileSystem(conf), p, new CacheConfig(conf), conf);
    reader.loadFileInfo();
    HFileScanner scanner = reader.getScanner(false, false);
    scanner.seekTo();
    int count = 0;
    do {
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.CacheConfig

  {
    HFileContext meta = new HFileContextBuilder()
                        .withBlockSize(BLOCKSIZE)
                        .withCompression(COMPRESSION)
                        .build();
    HFile.Writer writer = HFile.getWriterFactory(configuration, new CacheConfig(configuration))
        .withPath(fs, path)
        .withFileContext(meta)
        .create();
    long now = System.currentTimeMillis();
    try {
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.CacheConfig

    writer.append(new KeyValue(row2, family, qf1, Bytes.toBytes(3)));
    writer.append(new KeyValue(row2, family, qf2, Bytes.toBytes(4)));
    writer.close();

    // Verify that compression and encoding settings are respected
    HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), conf);
    assertEquals(hcd.getCompressionType(), reader.getCompressionAlgorithm());
    assertEquals(hcd.getDataBlockEncoding(), reader.getDataBlockEncoding());
    reader.close();
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.CacheConfig

    Path storedir = f.getPath().getParent();
    long seqid = f.getMaxSequenceId();
    Configuration c = HBaseConfiguration.create();
    FileSystem fs = FileSystem.get(c);
    HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL).build();
    StoreFile.Writer w = new StoreFile.WriterBuilder(c, new CacheConfig(c),
        fs)
            .withOutputDir(storedir)
            .withFileContext(meta)
            .build();
    w.appendMetadata(seqid + 1, false);
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.CacheConfig

        + encodeOnDisk + ", includeTags : " + includeTags + ", compressTags : " + compressTags);
    if(includeTags) {
      testUtil.getConfiguration().setInt(HFile.FORMAT_VERSION_KEY, 3);
    }
    LruBlockCache cache =
      (LruBlockCache)new CacheConfig(testUtil.getConfiguration()).getBlockCache();
    cache.clearCache();
    // Need to disable default row bloom filter for this test to pass.
    HColumnDescriptor hcd = (new HColumnDescriptor(CF_NAME)).setMaxVersions(MAX_VERSIONS).
        setDataBlockEncoding(encoding).
        setBlocksize(BLOCK_SIZE).
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.CacheConfig

      final Pair<byte[][], byte[][]> startEndKeys)
      throws IOException {
    final Path hfilePath = item.hfilePath;
    final FileSystem fs = hfilePath.getFileSystem(getConf());
    HFile.Reader hfr = HFile.createReader(fs, hfilePath,
        new CacheConfig(getConf()));
    final byte[] first, last;
    try {
      hfr.loadFileInfo();
      first = hfr.getFirstRowKey();
      last = hfr.getLastRowKey();
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.CacheConfig

  private static void copyHFileHalf(
      Configuration conf, Path inFile, Path outFile, Reference reference,
      HColumnDescriptor familyDescriptor)
  throws IOException {
    FileSystem fs = inFile.getFileSystem(conf);
    CacheConfig cacheConf = new CacheConfig(conf);
    HalfStoreFileReader halfReader = null;
    StoreFile.Writer halfWriter = null;
    HFileDataBlockEncoder dataBlockEncoder = new HFileDataBlockEncoderImpl(
        familyDescriptor.getDataBlockEncodingOnDisk(),
        familyDescriptor.getDataBlockEncoding());
    try {
      halfReader = new HalfStoreFileReader(fs, inFile, cacheConf,
          reference, DataBlockEncoding.NONE);
      Map<byte[], byte[]> fileInfo = halfReader.loadFileInfo();

      int blocksize = familyDescriptor.getBlocksize();
      Algorithm compression = familyDescriptor.getCompression();
      BloomType bloomFilterType = familyDescriptor.getBloomFilterType();

      halfWriter = new StoreFile.WriterBuilder(conf, cacheConf,
          fs, blocksize)
              .withFilePath(outFile)
              .withCompression(compression)
              .withDataBlockEncoder(dataBlockEncoder)
              .withBloomType(bloomFilterType)
              .withChecksumType(Store.getChecksumType(conf))
              .withBytesPerChecksum(Store.getBytesPerChecksum(conf))
              .build();
      HFileScanner scanner = halfReader.getScanner(false, false, false);
      scanner.seekTo();
      do {
        KeyValue kv = scanner.getKeyValue();
        halfWriter.append(kv);
      } while (scanner.next());

      for (Map.Entry<byte[],byte[]> entry : fileInfo.entrySet()) {
        if (shouldCopyHFileMetaKey(entry.getKey())) {
          halfWriter.appendFileInfo(entry.getKey(), entry.getValue());
        }
      }
    } finally {
      if (halfWriter != null) halfWriter.close();
      if (halfReader != null) halfReader.close(cacheConf.shouldEvictOnClose());
    }
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.