Package parquet.bytes

Examples of parquet.bytes.BytesInput$CapacityBAOSBytesInput


     * @param size the size of the page
     * @return the page
     * @throws IOException
     */
    public BytesInput readAsBytesInput(int size) throws IOException {
      final BytesInput r = BytesInput.from(this.buf, this.pos, size);
      this.pos += size;
      return r;
    }
View Full Code Here


    }else{
      logger.warn("This Hadoop implementation does not support a " + codecName +
        " direct decompression codec interface. "+
        "Direct decompression is available only on *nix systems with Hadoop 2.3 or greater. "+
        "Read operations will be a little slower. ");
      BytesInput outBytesInp = this.decompress(
        new HadoopByteBufBytesInput(inpBuffer, 0, inpBuffer.limit()),
        uncompressedSize,
        codecName);
      // COPY the data back into the output buffer.
      // (DrillBufs can only refer to direct memory, so we cannot pass back a BytesInput backed
      // by a byte array).
      outBuffer.put(outBytesInp.toByteArray());
    }
    return new HadoopByteBufBytesInput(outBuffer, 0, outBuffer.limit());
  }
View Full Code Here

      if (columnChunkMetaData.getDictionaryPageOffset() > 0) {
        f.seek(columnChunkMetaData.getDictionaryPageOffset());
        PageHeader pageHeader = Util.readPageHeader(f);
        assert pageHeader.type == PageType.DICTIONARY_PAGE;

        BytesInput bytesIn;
        ByteBuf uncompressedData=allocateBuffer(pageHeader.getUncompressed_page_size());
        allocatedDictionaryBuffers.add(uncompressedData);
        if(parentColumnReader.columnChunkMetaData.getCodec()==CompressionCodecName.UNCOMPRESSED) {
          dataReader.getPageAsBytesBuf(uncompressedData, pageHeader.compressed_page_size);
          bytesIn=parentColumnReader.parentReader.getCodecFactoryExposer().getBytesInput(uncompressedData,
View Full Code Here

    do {
      pageHeader = dataReader.readPageHeader();
      if (pageHeader.getType() == PageType.DICTIONARY_PAGE) {

        //TODO: Handle buffer allocation exception
        BytesInput bytesIn;
        ByteBuf uncompressedData=allocateBuffer(pageHeader.getUncompressed_page_size());
        allocatedDictionaryBuffers.add(uncompressedData);
        if( parentColumnReader.columnChunkMetaData.getCodec()== CompressionCodecName.UNCOMPRESSED) {
          dataReader.getPageAsBytesBuf(uncompressedData, pageHeader.compressed_page_size);
          bytesIn=parentColumnReader.parentReader.getCodecFactoryExposer().getBytesInput(uncompressedData,
            pageHeader.getUncompressed_page_size());
        }else{
          ByteBuf compressedData=allocateBuffer(pageHeader.compressed_page_size);
          dataReader.getPageAsBytesBuf(compressedData, pageHeader.compressed_page_size);
          bytesIn = parentColumnReader.parentReader.getCodecFactoryExposer()
            .decompress(parentColumnReader.columnChunkMetaData.getCodec(),
              compressedData,
              uncompressedData,
              pageHeader.compressed_page_size,
              pageHeader.getUncompressed_page_size());
          compressedData.release();
        }
        DictionaryPage page = new DictionaryPage(
            bytesIn,
            pageHeader.uncompressed_page_size,
            pageHeader.dictionary_page_header.num_values,
            parquet.column.Encoding.valueOf(pageHeader.dictionary_page_header.encoding.name())
        );
        this.dictionary = page.getEncoding().initDictionary(parentColumnReader.columnDescriptor, page);
      }
    } while (pageHeader.getType() == PageType.DICTIONARY_PAGE);

    //TODO: Handle buffer allocation exception
    BytesInput bytesIn;
    ByteBuf uncompressedData=allocateBuffer(pageHeader.getUncompressed_page_size());
    allocatedBuffers.add(uncompressedData);
    if(parentColumnReader.columnChunkMetaData.getCodec()==CompressionCodecName.UNCOMPRESSED) {
      dataReader.getPageAsBytesBuf(uncompressedData, pageHeader.compressed_page_size);
      bytesIn=parentColumnReader.parentReader.getCodecFactoryExposer().getBytesInput(uncompressedData,
View Full Code Here

      if (columnChunkMetaData.getDictionaryPageOffset() > 0) {
        f.seek(columnChunkMetaData.getDictionaryPageOffset());
        PageHeader pageHeader = Util.readPageHeader(f);
        assert pageHeader.type == PageType.DICTIONARY_PAGE;

        BytesInput bytesIn;
        ByteBuf uncompressedData=allocateBuffer(pageHeader.getUncompressed_page_size());
        allocatedDictionaryBuffers.add(uncompressedData);
        if(parentColumnReader.columnChunkMetaData.getCodec()==CompressionCodecName.UNCOMPRESSED) {
          dataReader.getPageAsBytesBuf(uncompressedData, pageHeader.compressed_page_size);
          bytesIn=parentColumnReader.parentReader.getCodecFactoryExposer().getBytesInput(uncompressedData,
View Full Code Here

    do {
      pageHeader = dataReader.readPageHeader();
      if (pageHeader.getType() == PageType.DICTIONARY_PAGE) {

        //TODO: Handle buffer allocation exception
        BytesInput bytesIn;
        ByteBuf uncompressedData=allocateBuffer(pageHeader.getUncompressed_page_size());
        allocatedDictionaryBuffers.add(uncompressedData);
        if( parentColumnReader.columnChunkMetaData.getCodec()== CompressionCodecName.UNCOMPRESSED) {
          dataReader.getPageAsBytesBuf(uncompressedData, pageHeader.compressed_page_size);
          bytesIn=parentColumnReader.parentReader.getCodecFactoryExposer().getBytesInput(uncompressedData,
            pageHeader.getUncompressed_page_size());
        }else{
          ByteBuf compressedData=allocateBuffer(pageHeader.compressed_page_size);
          dataReader.getPageAsBytesBuf(compressedData, pageHeader.compressed_page_size);
          bytesIn = parentColumnReader.parentReader.getCodecFactoryExposer()
            .decompress(parentColumnReader.columnChunkMetaData.getCodec(),
              compressedData,
              uncompressedData,
              pageHeader.compressed_page_size,
              pageHeader.getUncompressed_page_size());
          compressedData.release();
        }
        DictionaryPage page = new DictionaryPage(
            bytesIn,
            pageHeader.uncompressed_page_size,
            pageHeader.dictionary_page_header.num_values,
            parquet.column.Encoding.valueOf(pageHeader.dictionary_page_header.encoding.name())
        );
        this.dictionary = page.getEncoding().initDictionary(parentColumnReader.columnDescriptor, page);
      }
    } while (pageHeader.getType() == PageType.DICTIONARY_PAGE);

    //TODO: Handle buffer allocation exception
    BytesInput bytesIn;
    ByteBuf uncompressedData=allocateBuffer(pageHeader.getUncompressed_page_size());
    allocatedBuffers.add(uncompressedData);
    if(parentColumnReader.columnChunkMetaData.getCodec()==CompressionCodecName.UNCOMPRESSED) {
      dataReader.getPageAsBytesBuf(uncompressedData, pageHeader.compressed_page_size);
      bytesIn=parentColumnReader.parentReader.getCodecFactoryExposer().getBytesInput(uncompressedData,
View Full Code Here

TOP

Related Classes of parquet.bytes.BytesInput$CapacityBAOSBytesInput

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.