int columnsToScan = 0;
mockRecordsRead = 0;
MaterializedField field;
// ParquetMetadataConverter metaConverter = new ParquetMetadataConverter();
FileMetaData fileMetaData;
logger.debug("Reading row group({}) with {} records in file {}.", rowGroupIndex, footer.getBlocks().get(rowGroupIndex).getRowCount(),
hadoopPath.toUri().getPath());
totalRecordsRead = 0;
// TODO - figure out how to deal with this better once we add nested reading, note also look where this map is used below
// store a map from column name to converted types if they are non-null
HashMap<String, SchemaElement> schemaElements = new HashMap<>();
fileMetaData = new ParquetMetadataConverter().toParquetMetadata(ParquetFileWriter.CURRENT_VERSION, footer);
for (SchemaElement se : fileMetaData.getSchema()) {
schemaElements.put(se.getName(), se);
}
// loop to add up the length of the fixed width columns and build the schema
for (int i = 0; i < columns.size(); ++i) {
column = columns.get(i);
logger.debug("name: " + fileMetaData.getSchema().get(i).name);
SchemaElement se = schemaElements.get(column.getPath()[0]);
MajorType mt = ParquetToDrillTypeConverter.toMajorType(column.getType(), se.getType_length(), getDataMode(column), se);
field = MaterializedField.create(toFieldName(column.getPath()),mt);
if ( ! fieldSelected(field)) {
continue;