ColumnDescriptor column;
ColumnChunkMetaData columnChunkMetaData;
int columnsToScan = 0;
MaterializedField field;
ParquetMetadataConverter metaConverter = new ParquetMetadataConverter();
FileMetaData fileMetaData;
// TODO - figure out how to deal with this better once we add nested reading, note also look where this map is used below
// store a map from column name to converted types if they are non-null
HashMap<String, SchemaElement> schemaElements = new HashMap<>();
fileMetaData = new ParquetMetadataConverter().toParquetMetadata(ParquetFileWriter.CURRENT_VERSION, footer);
for (SchemaElement se : fileMetaData.getSchema()) {
schemaElements.put(se.getName(), se);
}
// loop to add up the length of the fixed width columns and build the schema