Package org.apache.drill.exec.physical.base

Examples of org.apache.drill.exec.physical.base.ScanStats


  /// TODO: this method is same as the one for ScanPrel...eventually we should consolidate
  /// this and few other methods in a common base class which would be extended
  /// by both logical and physical rels.
  @Override
  public RelOptCost computeSelfCost(RelOptPlanner planner) {
    ScanStats stats = this.groupScan.getScanStats();
    int columnCount = this.getRowType().getFieldCount();

    if(PrelUtil.getSettings(getCluster()).useDefaultCosting()) {
      return planner.getCostFactory().makeCost(stats.getRecordCount() * columnCount, stats.getCpuCost(), stats.getDiskCost());
    }

    // double rowCount = RelMetadataQuery.getRowCount(this);
    double rowCount = stats.getRecordCount();

    double cpuCost = rowCount * columnCount; // for now, assume cpu cost is proportional to row count.
    // Even though scan is reading from disk, in the currently generated plans all plans will
    // need to read the same amount of data, so keeping the disk io cost 0 is ok for now.
    // In the future we might consider alternative scans that go against projections or
View Full Code Here


    return this.groupScan.getScanStats().getRecordCount();
  }

  @Override
  public RelOptCost computeSelfCost(RelOptPlanner planner) {
    ScanStats stats = this.groupScan.getScanStats();
    int columnCount = this.getRowType().getFieldCount();

    if(PrelUtil.getSettings(getCluster()).useDefaultCosting()) {
      return planner.getCostFactory().makeCost(stats.getRecordCount() * columnCount, stats.getCpuCost(), stats.getDiskCost());
    }

    // double rowCount = RelMetadataQuery.getRowCount(this);
    double rowCount = stats.getRecordCount();

    double cpuCost = rowCount * columnCount; // for now, assume cpu cost is proportional to row count.
    // Even though scan is reading from disk, in the currently generated plans all plans will
    // need to read the same amount of data, so keeping the disk io cost 0 is ok for now.
    // In the future we might consider alternative scans that go against projections or
View Full Code Here


  @Override
  public ScanStats getScanStats() {
    int columnCount = columns == null ? 20 : columns.size();
    return new ScanStats(GroupScanProperty.EXACT_ROW_COUNT, rowCount, 1, rowCount * columnCount);
  }
View Full Code Here

    for(CompleteFileWork work : chunks){
      data += work.getTotalBytes();
    }

    long estRowCount = data/1024;
    return new ScanStats(GroupScanProperty.NO_EXACT_ROW_COUNT, estRowCount, 1, data);
  }
View Full Code Here

      for(InputSplit split : inputSplits){
          data += split.getLength();
      }

      long estRowCount = data/1024;
      return new ScanStats(GroupScanProperty.NO_EXACT_ROW_COUNT, estRowCount, 1, data);
    } catch (IOException e) {
      throw new DrillRuntimeException(e);
    }
  }
View Full Code Here

  @Override
  public ScanStats getScanStats() {
    int rowCount =  (int) ((scanSizeInBytes / statsCalculator.getAvgRowSizeInBytes()) * (hbaseScanSpec.getFilter() != null ? 0.5 : 1));
    // the following calculation is not precise since 'columns' could specify CFs while getColsPerRow() returns the number of qualifier.
    float diskCost = scanSizeInBytes * ((columns == null || columns.isEmpty()) ? 1 : columns.size()/statsCalculator.getColsPerRow());
    return new ScanStats(GroupScanProperty.NO_EXACT_ROW_COUNT, rowCount, 1, diskCost);
  }
View Full Code Here

  }

  @Override
  public ScanStats getScanStats() {
    int columnCount = columns == null ? 20 : columns.size();
    return new ScanStats(GroupScanProperty.EXACT_ROW_COUNT, rowCount, 1, rowCount * columnCount);
  }
View Full Code Here

    for (CompleteFileWork work : chunks) {
      data += work.getTotalBytes();
    }

    long estRowCount = data/1024;
    return new ScanStats(GroupScanProperty.NO_EXACT_ROW_COUNT, estRowCount, 1, data);
  }
View Full Code Here

      DB db = client.getDB(scanSpec.getDbName());
      db.setReadPreference(ReadPreference.nearest());
      DBCollection collection = db.getCollectionFromString(scanSpec
          .getCollectionName());
      CommandResult stats = collection.getStats();
      return new ScanStats(GroupScanProperty.EXACT_ROW_COUNT,
          stats.getLong(COUNT), 1, (float) stats.getDouble(SIZE));
    } catch (Exception e) {
      throw new DrillRuntimeException(e.getMessage(), e);
    }
  }
View Full Code Here

  @Override
  public ScanStats getScanStats() {
    int rowCount =  (int) ((scanSizeInBytes / statsCalculator.getAvgRowSizeInBytes()) * (hbaseScanSpec.getFilter() != null ? 0.5 : 1));
    // the following calculation is not precise since 'columns' could specify CFs while getColsPerRow() returns the number of qualifier.
    float diskCost = scanSizeInBytes * ((columns == null || columns.isEmpty()) ? 1 : columns.size()/statsCalculator.getColsPerRow());
    return new ScanStats(GroupScanProperty.NO_EXACT_ROW_COUNT, rowCount, 1, diskCost);
  }
View Full Code Here

TOP

Related Classes of org.apache.drill.exec.physical.base.ScanStats

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.