Examples of HFileDataBlockEncoder


Examples of org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder

  throws IOException {
    FileSystem fs = inFile.getFileSystem(conf);
    CacheConfig cacheConf = new CacheConfig(conf);
    HalfStoreFileReader halfReader = null;
    StoreFile.Writer halfWriter = null;
    HFileDataBlockEncoder dataBlockEncoder = new HFileDataBlockEncoderImpl(
        familyDescriptor.getDataBlockEncodingOnDisk(),
        familyDescriptor.getDataBlockEncoding());
    try {
      halfReader = new HalfStoreFileReader(fs, inFile, cacheConf,
          reference, DataBlockEncoding.NONE);
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder

    final Map<byte[], Integer> blockSizeMap = createFamilyBlockSizeMap(conf);
   
    String dataBlockEncodingStr = conf.get(DATABLOCK_ENCODING_OVERRIDE_CONF_KEY);
    final Map<byte[], HFileDataBlockEncoder> datablockEncodingMap =
        createFamilyDataBlockEncodingMap(conf);
    final HFileDataBlockEncoder overriddenEncoder;
    if (dataBlockEncodingStr != null) {
      overriddenEncoder = getDataBlockEncoderFromString(dataBlockEncodingStr);
    } else {
      overriddenEncoder = null;
    }

    return new RecordWriter<ImmutableBytesWritable, KeyValue>() {
      // Map of families to writers and how much has been output on the writer.
      private final Map<byte [], WriterLength> writers =
        new TreeMap<byte [], WriterLength>(Bytes.BYTES_COMPARATOR);
      private byte [] previousRow = HConstants.EMPTY_BYTE_ARRAY;
      private final byte [] now = Bytes.toBytes(System.currentTimeMillis());
      private boolean rollRequested = false;

      public void write(ImmutableBytesWritable row, KeyValue kv)
      throws IOException {
        // null input == user explicitly wants to flush
        if (row == null && kv == null) {
          rollWriters();
          return;
        }

        byte [] rowKey = kv.getRow();
        long length = kv.getLength();
        byte [] family = kv.getFamily();
        WriterLength wl = this.writers.get(family);

        // If this is a new column family, verify that the directory exists
        if (wl == null) {
          fs.mkdirs(new Path(outputdir, Bytes.toString(family)));
        }

        // If any of the HFiles for the column families has reached
        // maxsize, we need to roll all the writers
        if (wl != null && wl.written + length >= maxsize) {
          this.rollRequested = true;
        }

        // This can only happen once a row is finished though
        if (rollRequested && Bytes.compareTo(this.previousRow, rowKey) != 0) {
          rollWriters();
        }

        // create a new HLog writer, if necessary
        if (wl == null || wl.writer == null) {
          wl = getNewWriter(family, conf);
        }

        // we now have the proper HLog writer. full steam ahead
        kv.updateLatestStamp(this.now);
        wl.writer.append(kv);
        wl.written += length;

        // Copy the row so we know when a row transition.
        this.previousRow = rowKey;
      }

      private void rollWriters() throws IOException {
        for (WriterLength wl : this.writers.values()) {
          if (wl.writer != null) {
            LOG.info("Writer=" + wl.writer.getPath() +
                ((wl.written == 0)? "": ", wrote=" + wl.written));
            close(wl.writer);
          }
          wl.writer = null;
          wl.written = 0;
        }
        this.rollRequested = false;
      }

      /* Create a new StoreFile.Writer.
       * @param family
       * @return A WriterLength, containing a new StoreFile.Writer.
       * @throws IOException
       */
      private WriterLength getNewWriter(byte[] family, Configuration conf)
          throws IOException {
        WriterLength wl = new WriterLength();
        Path familydir = new Path(outputdir, Bytes.toString(family));
        Algorithm compression = compressionMap.get(family);
        compression = compression == null ? defaultCompression : compression;
        BloomType bloomType = bloomTypeMap.get(family);
        bloomType = bloomType == null ? BloomType.NONE : bloomType;
        Integer blockSize = blockSizeMap.get(family);
        blockSize = blockSize == null ? HFile.DEFAULT_BLOCKSIZE : blockSize;
        HFileDataBlockEncoder encoder = overriddenEncoder;
        encoder = encoder == null ? datablockEncodingMap.get(family) : encoder;
        encoder = encoder == null ? NoOpDataBlockEncoder.INSTANCE : encoder;

        Configuration tempConf = new Configuration(conf);
        tempConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder

    }
    return encoderMap;
  }

  private static HFileDataBlockEncoder getDataBlockEncoderFromString(String dataBlockEncodingStr) {
    HFileDataBlockEncoder encoder;
    try {
      encoder = new HFileDataBlockEncoderImpl(DataBlockEncoding.valueOf(dataBlockEncodingStr));
    } catch (IllegalArgumentException ex) {
      throw new RuntimeException("Invalid data block encoding type configured for the param "
          + DATABLOCK_ENCODING_FAMILIES_CONF_KEY + " : " + dataBlockEncodingStr);
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder

      throws Exception {
    Map<HStore, HFileDataBlockEncoder> replaceBlockCache =
        new HashMap<HStore, HFileDataBlockEncoder>();
    for (Entry<byte[], Store> pair : r.getStores().entrySet()) {
      HStore store = (HStore) pair.getValue();
      HFileDataBlockEncoder blockEncoder = store.getDataBlockEncoder();
      replaceBlockCache.put(store, blockEncoder);
      final DataBlockEncoding inCache = DataBlockEncoding.PREFIX;
      final DataBlockEncoding onDisk = inCacheOnly ? DataBlockEncoding.NONE :
          inCache;
      store.setDataBlockEncoderInTest(new HFileDataBlockEncoderImpl(
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder

  throws IOException {
    FileSystem fs = inFile.getFileSystem(conf);
    CacheConfig cacheConf = new CacheConfig(conf);
    HalfStoreFileReader halfReader = null;
    StoreFile.Writer halfWriter = null;
    HFileDataBlockEncoder dataBlockEncoder = new HFileDataBlockEncoderImpl(
        familyDescriptor.getDataBlockEncodingOnDisk(),
        familyDescriptor.getDataBlockEncoding());
    try {
      halfReader = new HalfStoreFileReader(fs, inFile, cacheConf,
          reference, DataBlockEncoding.NONE);
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder

    Path dir = new Path(new Path(this.testDir, "7e0102"), "familyname");
    Path path = new Path(dir, "1234567890");

    DataBlockEncoding dataBlockEncoderAlgo =
        DataBlockEncoding.FAST_DIFF;
    HFileDataBlockEncoder dataBlockEncoder =
        new HFileDataBlockEncoderImpl(
            dataBlockEncoderAlgo,
            dataBlockEncoderAlgo);
    cacheConf = new CacheConfig(conf);
    StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder

    Path dir = new Path(new Path(this.testDir, "7e0102"), "familyname");
    Path path = new Path(dir, "1234567890");

    DataBlockEncoding dataBlockEncoderAlgo =
        DataBlockEncoding.FAST_DIFF;
    HFileDataBlockEncoder dataBlockEncoder =
        new HFileDataBlockEncoderImpl(
            dataBlockEncoderAlgo,
            dataBlockEncoderAlgo);
    cacheConf = new CacheConfig(conf);
    StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder

  throws IOException {
    FileSystem fs = inFile.getFileSystem(conf);
    CacheConfig cacheConf = new CacheConfig(conf);
    HalfStoreFileReader halfReader = null;
    StoreFile.Writer halfWriter = null;
    HFileDataBlockEncoder dataBlockEncoder = new HFileDataBlockEncoderImpl(
        familyDescriptor.getDataBlockEncodingOnDisk(),
        familyDescriptor.getDataBlockEncoding());
    try {
      halfReader = new HalfStoreFileReader(fs, inFile, cacheConf,
          reference, DataBlockEncoding.NONE);
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder

      throws Exception {
    Map<Store, HFileDataBlockEncoder> replaceBlockCache =
        new HashMap<Store, HFileDataBlockEncoder>();
    for (Entry<byte[], Store> pair : r.getStores().entrySet()) {
      Store store = pair.getValue();
      HFileDataBlockEncoder blockEncoder = store.getDataBlockEncoder();
      replaceBlockCache.put(pair.getValue(), blockEncoder);
      final DataBlockEncoding inCache = DataBlockEncoding.PREFIX;
      final DataBlockEncoding onDisk = inCacheOnly ? DataBlockEncoding.NONE :
          inCache;
      store.setDataBlockEncoderInTest(new HFileDataBlockEncoderImpl(
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder

    Path dir = new Path(new Path(this.testDir, "7e0102"), "familyname");
    Path path = new Path(dir, "1234567890");

    DataBlockEncoding dataBlockEncoderAlgo =
        DataBlockEncoding.FAST_DIFF;
    HFileDataBlockEncoder dataBlockEncoder =
        new HFileDataBlockEncoderImpl(
            dataBlockEncoderAlgo,
            dataBlockEncoderAlgo);
    cacheConf = new CacheConfig(conf);
    StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.