// because it is needed, but there might be a problem with it
ByteBufInputStream f = new ByteBufInputStream(parentColumnReader.parentReader.getBufferWithAllData().slice(
(int) parentColumnReader.readPositionInBuffer,
Math.min(200, parentColumnReader.parentReader.getBufferWithAllData().capacity() - (int) parentColumnReader.readPositionInBuffer)));
int before = f.available();
PageHeader pageHeader = readPageHeader(f);
int length = before - f.available();
f = new ByteBufInputStream(parentColumnReader.parentReader.getBufferWithAllData().slice(
(int) parentColumnReader.readPositionInBuffer + length, pageHeader.getCompressed_page_size()));
BytesInput bytesIn = parentColumnReader.parentReader.getCodecFactoryExposer()
.decompress(BytesInput.from(f, pageHeader.compressed_page_size), pageHeader.getUncompressed_page_size(),
parentColumnReader.columnChunkMetaData.getCodec());
currentPage = new Page(
bytesIn,
pageHeader.data_page_header.num_values,
pageHeader.uncompressed_page_size,
ParquetStorageEngine.parquetMetadataConverter.getEncoding(pageHeader.data_page_header.repetition_level_encoding),
ParquetStorageEngine.parquetMetadataConverter.getEncoding(pageHeader.data_page_header.definition_level_encoding),
ParquetStorageEngine.parquetMetadataConverter.getEncoding(pageHeader.data_page_header.encoding)
);
parentColumnReader.readPositionInBuffer += pageHeader.compressed_page_size + length;
byteLength = pageHeader.uncompressed_page_size;
if (currentPage == null) {
return false;
}
// if the buffer holding each page's data is not large enough to hold the current page, re-allocate, with a little extra space
if (pageHeader.getUncompressed_page_size() > pageDataByteArray.length) {
pageDataByteArray = new byte[pageHeader.getUncompressed_page_size() + 100];
}
// TODO - would like to get this into the mainline, hopefully before alpha
pageDataByteArray = currentPage.getBytes().toByteArray();
if (parentColumnReader.columnDescriptor.getMaxDefinitionLevel() != 0){