Package org.apache.drill.exec.store.parquet.columnreaders

Examples of org.apache.drill.exec.store.parquet.columnreaders.ParquetRecordReader


    int totalRowCount = 0;

    FileSystem fs = new CachedSingleFileSystem(fileName);
    BufferAllocator allocator = new TopLevelAllocator();
    for(int i = 0; i < 25; i++){
      ParquetRecordReader rr = new ParquetRecordReader(context, 256000, fileName, 0, fs,
          new CodecFactoryExposer(dfsConfig), f.getParquetMetadata(), columns);
      TestOutputMutator mutator = new TestOutputMutator(allocator);
      rr.setup(mutator);
      Stopwatch watch = new Stopwatch();
      watch.start();

      int rowCount = 0;
      while ((rowCount = rr.next()) > 0) {
        totalRowCount += rowCount;
      }
      System.out.println(String.format("Time completed: %s. ", watch.elapsed(TimeUnit.MILLISECONDS)));
      rr.cleanup();
    }

    allocator.close();
    System.out.println(String.format("Total row count %s", totalRowCount));
  }
View Full Code Here


          footers.put(e.getPath(),
              ParquetFileReader.readFooter( fs.getConf(), new Path(e.getPath())));
        }
        if (!context.getOptions().getOption(ExecConstants.PARQUET_NEW_RECORD_READER).bool_val && !isComplex(footers.get(e.getPath()))) {
          readers.add(
              new ParquetRecordReader(
                  context, e.getPath(), e.getRowGroupIndex(), fs,
                  rowGroupScan.getStorageEngine().getCodecFactoryExposer(),
                  footers.get(e.getPath()),
                  rowGroupScan.getColumns()
              )
View Full Code Here

    int totalRowCount = 0;

    FileSystem fs = new CachedSingleFileSystem(fileName);
    BufferAllocator allocator = new TopLevelAllocator();
    for(int i = 0; i < 25; i++){
      ParquetRecordReader rr = new ParquetRecordReader(context, 256000, fileName, 0, fs,
          new CodecFactoryExposer(dfsConfig), f.getParquetMetadata(), columns);
      TestOutputMutator mutator = new TestOutputMutator(allocator);
      rr.setup(mutator);
      Stopwatch watch = new Stopwatch();
      watch.start();

      int rowCount = 0;
      while ((rowCount = rr.next()) > 0) {
        totalRowCount += rowCount;
      }
      System.out.println(String.format("Time completed: %s. ", watch.elapsed(TimeUnit.MILLISECONDS)));
      rr.cleanup();
    }

    allocator.close();
    System.out.println(String.format("Total row count %s", totalRowCount));
  }
View Full Code Here

          footers.put(e.getPath(),
              ParquetFileReader.readFooter( fs.getConf(), new Path(e.getPath())));
        }
        if (!context.getOptions().getOption(ExecConstants.PARQUET_NEW_RECORD_READER).bool_val && !isComplex(footers.get(e.getPath()))) {
          readers.add(
              new ParquetRecordReader(
                  context, e.getPath(), e.getRowGroupIndex(), fs,
                  rowGroupScan.getStorageEngine().getCodecFactoryExposer(),
                  footers.get(e.getPath()),
                  rowGroupScan.getColumns()
              )
View Full Code Here

    int totalRowCount = 0;

    FileSystem fs = new CachedSingleFileSystem(fileName);
    BufferAllocator allocator = new TopLevelAllocator();
    for(int i = 0; i < 25; i++) {
      ParquetRecordReader rr = new ParquetRecordReader(context, 256000, fileName, 0, fs,
          new CodecFactoryExposer(dfsConfig), f.getParquetMetadata(), columns);
      TestOutputMutator mutator = new TestOutputMutator(allocator);
      rr.setup(mutator);
      Stopwatch watch = new Stopwatch();
      watch.start();

      int rowCount = 0;
      while ((rowCount = rr.next()) > 0) {
        totalRowCount += rowCount;
      }
      System.out.println(String.format("Time completed: %s. ", watch.elapsed(TimeUnit.MILLISECONDS)));
      rr.cleanup();
    }

    allocator.close();
    System.out.println(String.format("Total row count %s", totalRowCount));
  }
View Full Code Here

          footers.put(e.getPath(),
              ParquetFileReader.readFooter( fs.getConf(), new Path(e.getPath())));
        }
        if (!context.getOptions().getOption(ExecConstants.PARQUET_NEW_RECORD_READER).bool_val && !isComplex(footers.get(e.getPath()))) {
          readers.add(
              new ParquetRecordReader(
                  context, e.getPath(), e.getRowGroupIndex(), fs,
                  rowGroupScan.getStorageEngine().getCodecFactoryExposer(),
                  footers.get(e.getPath()),
                  rowGroupScan.getColumns()
              )
View Full Code Here

TOP

Related Classes of org.apache.drill.exec.store.parquet.columnreaders.ParquetRecordReader

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.