if (lastPage != null) {
lastPage.release();
lastPage = null;
}
while(valueReadSoFar < metaData.getValueCount()) {
PageHeader pageHeader = Util.readPageHeader(in);
switch (pageHeader.type) {
case DICTIONARY_PAGE:
if (dictionaryPage == null) {
dictionaryPage =
new DictionaryPage(
decompressor.decompress(BytesInput.from(in, pageHeader.compressed_page_size), pageHeader.getUncompressed_page_size()),
pageHeader.uncompressed_page_size,
parquetMetadataConverter.getEncoding(pageHeader.dictionary_page_header.encoding)
);
} else {
in.skip(pageHeader.compressed_page_size);
}
break;
case DATA_PAGE:
valueReadSoFar += pageHeader.data_page_header.getNum_values();
ByteBuf buf = allocator.buffer(pageHeader.compressed_page_size);
lastPage = buf;
ByteBuffer buffer = buf.nioBuffer(0, pageHeader.compressed_page_size);
CompatibilityUtil.getBuf(in, buffer, pageHeader.compressed_page_size);
return new Page(
decompressor.decompress(BytesInput.from(buffer, 0, pageHeader.compressed_page_size), pageHeader.getUncompressed_page_size()),
pageHeader.data_page_header.num_values,
pageHeader.uncompressed_page_size,
parquetMetadataConverter.fromParquetStatistics(pageHeader.data_page_header.statistics, columnDescriptor.getType()),
parquetMetadataConverter.getEncoding(pageHeader.data_page_header.repetition_level_encoding),
parquetMetadataConverter.getEncoding(pageHeader.data_page_header.definition_level_encoding),