Package org.apache.hadoop.hbase.io.compress.Compression

Examples of org.apache.hadoop.hbase.io.compress.Compression.Algorithm


    final long maxsize = conf.getLong(HConstants.HREGION_MAX_FILESIZE,
        HConstants.DEFAULT_MAX_FILE_SIZE);
    // Invented config.  Add to hbase-*.xml if other than default compression.
    final String defaultCompressionStr = conf.get("hfile.compression",
        Compression.Algorithm.NONE.getName());
    final Algorithm defaultCompression = AbstractHFileWriter
        .compressionByName(defaultCompressionStr);
    final boolean compactionExclude = conf.getBoolean(
        "hbase.mapreduce.hfileoutputformat.compaction.exclude", false);

    // create a map from column family to the compression algorithm
    final Map<byte[], Algorithm> compressionMap = createFamilyCompressionMap(conf);
    final Map<byte[], BloomType> bloomTypeMap = createFamilyBloomTypeMap(conf);
    final Map<byte[], Integer> blockSizeMap = createFamilyBlockSizeMap(conf);

    String dataBlockEncodingStr = conf.get(DATABLOCK_ENCODING_OVERRIDE_CONF_KEY);
    final Map<byte[], DataBlockEncoding> datablockEncodingMap
        = createFamilyDataBlockEncodingMap(conf);
    final DataBlockEncoding overriddenEncoding;
    if (dataBlockEncodingStr != null) {
      overriddenEncoding = DataBlockEncoding.valueOf(dataBlockEncodingStr);
    } else {
      overriddenEncoding = null;
    }

    return new RecordWriter<ImmutableBytesWritable, KeyValue>() {
      // Map of families to writers and how much has been output on the writer.
      private final Map<byte [], WriterLength> writers =
        new TreeMap<byte [], WriterLength>(Bytes.BYTES_COMPARATOR);
      private byte [] previousRow = HConstants.EMPTY_BYTE_ARRAY;
      private final byte [] now = Bytes.toBytes(System.currentTimeMillis());
      private boolean rollRequested = false;

      public void write(ImmutableBytesWritable row, KeyValue kv)
      throws IOException {
        // null input == user explicitly wants to flush
        if (row == null && kv == null) {
          rollWriters();
          return;
        }

        byte [] rowKey = kv.getRow();
        long length = kv.getLength();
        byte [] family = kv.getFamily();
        WriterLength wl = this.writers.get(family);

        // If this is a new column family, verify that the directory exists
        if (wl == null) {
          fs.mkdirs(new Path(outputdir, Bytes.toString(family)));
        }

        // If any of the HFiles for the column families has reached
        // maxsize, we need to roll all the writers
        if (wl != null && wl.written + length >= maxsize) {
          this.rollRequested = true;
        }

        // This can only happen once a row is finished though
        if (rollRequested && Bytes.compareTo(this.previousRow, rowKey) != 0) {
          rollWriters();
        }

        // create a new HLog writer, if necessary
        if (wl == null || wl.writer == null) {
          wl = getNewWriter(family, conf);
        }

        // we now have the proper HLog writer. full steam ahead
        kv.updateLatestStamp(this.now);
        wl.writer.append(kv);
        wl.written += length;

        // Copy the row so we know when a row transition.
        this.previousRow = rowKey;
      }

      private void rollWriters() throws IOException {
        for (WriterLength wl : this.writers.values()) {
          if (wl.writer != null) {
            LOG.info("Writer=" + wl.writer.getPath() +
                ((wl.written == 0)? "": ", wrote=" + wl.written));
            close(wl.writer);
          }
          wl.writer = null;
          wl.written = 0;
        }
        this.rollRequested = false;
      }

      /* Create a new StoreFile.Writer.
       * @param family
       * @return A WriterLength, containing a new StoreFile.Writer.
       * @throws IOException
       */
      private WriterLength getNewWriter(byte[] family, Configuration conf)
          throws IOException {
        WriterLength wl = new WriterLength();
        Path familydir = new Path(outputdir, Bytes.toString(family));
        Algorithm compression = compressionMap.get(family);
        compression = compression == null ? defaultCompression : compression;
        BloomType bloomType = bloomTypeMap.get(family);
        bloomType = bloomType == null ? BloomType.NONE : bloomType;
        Integer blockSize = blockSizeMap.get(family);
        blockSize = blockSize == null ? HConstants.DEFAULT_BLOCKSIZE : blockSize;
View Full Code Here


    Map<byte[], String> stringMap = createFamilyConfValueMap(conf,
        COMPRESSION_FAMILIES_CONF_KEY);
    Map<byte[], Algorithm> compressionMap = new TreeMap<byte[],
        Algorithm>(Bytes.BYTES_COMPARATOR);
    for (Map.Entry<byte[], String> e : stringMap.entrySet()) {
      Algorithm algorithm = AbstractHFileWriter.compressionByName
          (e.getValue());
      compressionMap.put(e.getKey(), algorithm);
    }
    return compressionMap;
  }
View Full Code Here

      halfReader = new HalfStoreFileReader(fs, inFile, cacheConf,
          reference, DataBlockEncoding.NONE);
      Map<byte[], byte[]> fileInfo = halfReader.loadFileInfo();

      int blocksize = familyDescriptor.getBlocksize();
      Algorithm compression = familyDescriptor.getCompression();
      BloomType bloomFilterType = familyDescriptor.getBloomFilterType();

      halfWriter = new StoreFile.WriterBuilder(conf, cacheConf,
          fs, blocksize)
              .withFilePath(outFile)
View Full Code Here

    try {
      halfReader = new HalfStoreFileReader(fs, inFile, cacheConf, reference, conf);
      Map<byte[], byte[]> fileInfo = halfReader.loadFileInfo();

      int blocksize = familyDescriptor.getBlocksize();
      Algorithm compression = familyDescriptor.getCompression();
      BloomType bloomFilterType = familyDescriptor.getBloomFilterType();
      HFileContext hFileContext = new HFileContextBuilder()
                                  .withCompression(compression)
                                  .withChecksumType(HStore.getChecksumType(conf))
                                  .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf))
View Full Code Here

    try {
      halfReader = new HalfStoreFileReader(fs, inFile, cacheConf, reference, conf);
      Map<byte[], byte[]> fileInfo = halfReader.loadFileInfo();

      int blocksize = familyDescriptor.getBlocksize();
      Algorithm compression = familyDescriptor.getCompression();
      BloomType bloomFilterType = familyDescriptor.getBloomFilterType();
      HFileContext hFileContext = new HFileContextBuilder()
                                  .withCompression(compression)
                                  .withChecksumType(HStore.getChecksumType(conf))
                                  .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf))
View Full Code Here

    try {
      halfReader = new HalfStoreFileReader(fs, inFile, cacheConf, reference);
      Map<byte[], byte[]> fileInfo = halfReader.loadFileInfo();

      int blocksize = familyDescriptor.getBlocksize();
      Algorithm compression = familyDescriptor.getCompression();
      BloomType bloomFilterType = familyDescriptor.getBloomFilterType();

      halfWriter = new StoreFile.WriterBuilder(conf, cacheConf,
          fs, blocksize)
              .withFilePath(outFile)
View Full Code Here

      halfReader = new HalfStoreFileReader(fs, inFile, cacheConf,
          reference, DataBlockEncoding.NONE);
      Map<byte[], byte[]> fileInfo = halfReader.loadFileInfo();

      int blocksize = familyDescriptor.getBlocksize();
      Algorithm compression = familyDescriptor.getCompression();
      BloomType bloomFilterType = familyDescriptor.getBloomFilterType();

      halfWriter = new StoreFile.WriterBuilder(conf, cacheConf,
          fs, blocksize)
              .withFilePath(outFile)
View Full Code Here

    try {
      halfReader = new HalfStoreFileReader(fs, inFile, cacheConf, reference, conf);
      Map<byte[], byte[]> fileInfo = halfReader.loadFileInfo();

      int blocksize = familyDescriptor.getBlocksize();
      Algorithm compression = familyDescriptor.getCompression();
      BloomType bloomFilterType = familyDescriptor.getBloomFilterType();
      HFileContext hFileContext = new HFileContextBuilder()
                                  .withCompression(compression)
                                  .withChecksumType(HStore.getChecksumType(conf))
                                  .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf))
View Full Code Here

      halfReader = new HalfStoreFileReader(fs, inFile, cacheConf,
          reference, DataBlockEncoding.NONE);
      Map<byte[], byte[]> fileInfo = halfReader.loadFileInfo();

      int blocksize = familyDescriptor.getBlocksize();
      Algorithm compression = familyDescriptor.getCompression();
      BloomType bloomFilterType = familyDescriptor.getBloomFilterType();

      halfWriter = new StoreFile.WriterBuilder(conf, cacheConf,
          fs, blocksize)
              .withFilePath(outFile)
View Full Code Here

    try {
      halfReader = new HalfStoreFileReader(fs, inFile, cacheConf, reference, conf);
      Map<byte[], byte[]> fileInfo = halfReader.loadFileInfo();

      int blocksize = familyDescriptor.getBlocksize();
      Algorithm compression = familyDescriptor.getCompression();
      BloomType bloomFilterType = familyDescriptor.getBloomFilterType();
      HFileContext hFileContext = new HFileContextBuilder()
                                  .withCompression(compression)
                                  .withChecksumType(HStore.getChecksumType(conf))
                                  .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf))
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.io.compress.Compression.Algorithm

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.