Package org.apache.hadoop.hbase.io.encoding

Examples of org.apache.hadoop.hbase.io.encoding.DataBlockEncoding


    protected final HFileContext meta;

    public EncodedScannerV2(HFileReaderV2 reader, boolean cacheBlocks,
        boolean pread, boolean isCompaction, HFileContext meta) {
      super(reader, cacheBlocks, pread, isCompaction);
      DataBlockEncoding encoding = reader.dataBlockEncoder.getDataBlockEncoding();
      dataBlockEncoder = encoding.getEncoder();
      decodingCtx = dataBlockEncoder.newDataBlockDecodingContext(meta);
      seeker = dataBlockEncoder.createSeeker(
        reader.getComparator(), decodingCtx);
      this.meta = meta;
    }
View Full Code Here


  @VisibleForTesting
  public Map<DataBlockEncoding, Integer> getEncodingCountsForTest() {
    Map<DataBlockEncoding, Integer> counts =
        new EnumMap<DataBlockEncoding, Integer>(DataBlockEncoding.class);
    for (LruCachedBlock block : map.values()) {
      DataBlockEncoding encoding =
              ((HFileBlock) block.getBuffer()).getDataBlockEncoding();
      Integer count = counts.get(encoding);
      counts.put(encoding, (count == null ? 0 : count) + 1);
    }
    return counts;
View Full Code Here

    final Map<byte[], Integer> blockSizeMap = createFamilyBlockSizeMap(conf);

    String dataBlockEncodingStr = conf.get(DATABLOCK_ENCODING_OVERRIDE_CONF_KEY);
    final Map<byte[], DataBlockEncoding> datablockEncodingMap
        = createFamilyDataBlockEncodingMap(conf);
    final DataBlockEncoding overriddenEncoding;
    if (dataBlockEncodingStr != null) {
      overriddenEncoding = DataBlockEncoding.valueOf(dataBlockEncodingStr);
    } else {
      overriddenEncoding = null;
    }

    return new RecordWriter<ImmutableBytesWritable, V>() {
      // Map of families to writers and how much has been output on the writer.
      private final Map<byte [], WriterLength> writers =
        new TreeMap<byte [], WriterLength>(Bytes.BYTES_COMPARATOR);
      private byte [] previousRow = HConstants.EMPTY_BYTE_ARRAY;
      private final byte [] now = Bytes.toBytes(System.currentTimeMillis());
      private boolean rollRequested = false;

      public void write(ImmutableBytesWritable row, V cell)
          throws IOException {
        KeyValue kv = KeyValueUtil.ensureKeyValue(cell);

        // null input == user explicitly wants to flush
        if (row == null && kv == null) {
          rollWriters();
          return;
        }

        byte [] rowKey = CellUtil.cloneRow(kv);
        long length = kv.getLength();
        byte [] family = CellUtil.cloneFamily(kv);
        WriterLength wl = this.writers.get(family);

        // If this is a new column family, verify that the directory exists
        if (wl == null) {
          fs.mkdirs(new Path(outputdir, Bytes.toString(family)));
        }

        // If any of the HFiles for the column families has reached
        // maxsize, we need to roll all the writers
        if (wl != null && wl.written + length >= maxsize) {
          this.rollRequested = true;
        }

        // This can only happen once a row is finished though
        if (rollRequested && Bytes.compareTo(this.previousRow, rowKey) != 0) {
          rollWriters();
        }

        // create a new HLog writer, if necessary
        if (wl == null || wl.writer == null) {
          wl = getNewWriter(family, conf);
        }

        // we now have the proper HLog writer. full steam ahead
        kv.updateLatestStamp(this.now);
        wl.writer.append(kv);
        wl.written += length;

        // Copy the row so we know when a row transition.
        this.previousRow = rowKey;
      }

      private void rollWriters() throws IOException {
        for (WriterLength wl : this.writers.values()) {
          if (wl.writer != null) {
            LOG.info("Writer=" + wl.writer.getPath() +
                ((wl.written == 0)? "": ", wrote=" + wl.written));
            close(wl.writer);
          }
          wl.writer = null;
          wl.written = 0;
        }
        this.rollRequested = false;
      }

      /* Create a new StoreFile.Writer.
       * @param family
       * @return A WriterLength, containing a new StoreFile.Writer.
       * @throws IOException
       */
      private WriterLength getNewWriter(byte[] family, Configuration conf)
          throws IOException {
        WriterLength wl = new WriterLength();
        Path familydir = new Path(outputdir, Bytes.toString(family));
        Algorithm compression = compressionMap.get(family);
        compression = compression == null ? defaultCompression : compression;
        BloomType bloomType = bloomTypeMap.get(family);
        bloomType = bloomType == null ? BloomType.NONE : bloomType;
        Integer blockSize = blockSizeMap.get(family);
        blockSize = blockSize == null ? HConstants.DEFAULT_BLOCKSIZE : blockSize;
        DataBlockEncoding encoding = overriddenEncoding;
        encoding = encoding == null ? datablockEncodingMap.get(family) : encoding;
        encoding = encoding == null ? DataBlockEncoding.NONE : encoding;
        Configuration tempConf = new Configuration(conf);
        tempConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
        HFileContextBuilder contextBuilder = new HFileContextBuilder()
View Full Code Here

        dataBlockEncodingConfigValue.append('&');
      }
      dataBlockEncodingConfigValue.append(
          URLEncoder.encode(familyDescriptor.getNameAsString(), "UTF-8"));
      dataBlockEncodingConfigValue.append('=');
      DataBlockEncoding encoding = familyDescriptor.getDataBlockEncoding();
      if (encoding == null) {
        encoding = DataBlockEncoding.NONE;
      }
      dataBlockEncodingConfigValue.append(URLEncoder.encode(encoding.toString(),
          "UTF-8"));
    }
    conf.set(DATABLOCK_ENCODING_FAMILIES_CONF_KEY,
        dataBlockEncodingConfigValue.toString());
  }
View Full Code Here

  public void testDataBlockEncodingMetaData() throws IOException {
    // Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
    Path dir = new Path(new Path(this.testDir, "7e0102"), "familyname");
    Path path = new Path(dir, "1234567890");

    DataBlockEncoding dataBlockEncoderAlgo =
        DataBlockEncoding.FAST_DIFF;
    HFileDataBlockEncoder dataBlockEncoder =
        new HFileDataBlockEncoderImpl(
            dataBlockEncoderAlgo,
            dataBlockEncoderAlgo);
    cacheConf = new CacheConfig(conf);
    StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
        HFile.DEFAULT_BLOCKSIZE)
            .withFilePath(path)
            .withDataBlockEncoder(dataBlockEncoder)
            .withMaxKeyCount(2000)
            .withChecksumType(CKTYPE)
            .withBytesPerChecksum(CKBYTES)
            .build();
    writer.close();

    StoreFile storeFile = new StoreFile(fs, writer.getPath(), conf,
        cacheConf, BloomType.NONE, dataBlockEncoder);
    StoreFile.Reader reader = storeFile.createReader();

    Map<byte[], byte[]> fileInfo = reader.loadFileInfo();
    byte[] value = fileInfo.get(HFileDataBlockEncoder.DATA_BLOCK_ENCODING);

    assertEquals(dataBlockEncoderAlgo.getNameInBytes(), value);
  }
View Full Code Here

    final List<Thread> threads = new ArrayList<Thread>(this.N);
    final long[] timings = new long[this.N];
    final int perClientRows = R/N;
    final float sampleRate = this.sampleRate;
    final TableName tableName = this.tableName;
    final DataBlockEncoding encoding = this.blockEncoding;
    final boolean flushCommits = this.flushCommits;
    final Compression.Algorithm compression = this.compression;
    final boolean writeToWal = this.writeToWAL;
    final boolean reportLatency = this.reportLatency;
    final int preSplitRegions = this.presplitRegions;
View Full Code Here

  private void doMultipleClients(final Class<? extends Test> cmd) throws IOException {
    final List<Thread> threads = new ArrayList<Thread>(this.N);
    final long[] timings = new long[this.N];
    final int perClientRows = R/N;
    final TableName tableName = this.tableName;
    final DataBlockEncoding encoding = this.blockEncoding;
    final boolean flushCommits = this.flushCommits;
    final Compression.Algorithm compression = this.compression;
    final boolean writeToWal = this.writeToWAL;
    final int preSplitRegions = this.presplitRegions;
    final boolean useTags = this.useTags;
View Full Code Here

  private void createTable() throws Exception {
    deleteTable();
    LOG.info("Creating table");
    Configuration conf = util.getConfiguration();
    String encodingKey = String.format(ENCODING_KEY, this.getClass().getSimpleName());
    DataBlockEncoding blockEncoding = DataBlockEncoding.valueOf(conf.get(encodingKey, "FAST_DIFF"));
    HTableDescriptor htd = new HTableDescriptor(TABLE_NAME);
    for (byte[] cf : dataGen.getColumnFamilies()) {
      HColumnDescriptor hcd = new HColumnDescriptor(cf);
      hcd.setDataBlockEncoding(blockEncoding);
      htd.addFamily(hcd);
View Full Code Here

  public void testDataBlockEncodingMetaData() throws IOException {
    // Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
    Path dir = new Path(new Path(this.testDir, "7e0102"), "familyname");
    Path path = new Path(dir, "1234567890");

    DataBlockEncoding dataBlockEncoderAlgo =
        DataBlockEncoding.FAST_DIFF;
    HFileDataBlockEncoder dataBlockEncoder =
        new HFileDataBlockEncoderImpl(
            dataBlockEncoderAlgo);
    cacheConf = new CacheConfig(conf);
    HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL)
        .withChecksumType(CKTYPE)
        .withBytesPerCheckSum(CKBYTES)
        .withDataBlockEncoding(dataBlockEncoderAlgo)
        .build();
    // Make a store file and write data to it.
    StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
            .withFilePath(path)
            .withMaxKeyCount(2000)
            .withFileContext(meta)
            .build();
    writer.close();

    StoreFile storeFile = new StoreFile(fs, writer.getPath(), conf,
      cacheConf, BloomType.NONE);
    StoreFile.Reader reader = storeFile.createReader();

    Map<byte[], byte[]> fileInfo = reader.loadFileInfo();
    byte[] value = fileInfo.get(HFileDataBlockEncoder.DATA_BLOCK_ENCODING);
    assertEquals(dataBlockEncoderAlgo.getNameInBytes(), value);
  }
View Full Code Here

    long offset = 0;
    HFileBlock prevBlock = null;
    EnumMap<BlockType, Integer> blockCountByType =
        new EnumMap<BlockType, Integer>(BlockType.class);

    DataBlockEncoding encodingInCache =
        encoderType.getEncoder().getDataBlockEncoding();
    while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
      long onDiskSize = -1;
      if (prevBlock != null) {
         onDiskSize = prevBlock.getNextBlockOnDiskSizeWithHeader();
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.io.encoding.DataBlockEncoding

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.