Examples of DrillAggregateRel


Examples of org.apache.drill.exec.planner.logical.DrillAggregateRel

    return PrelUtil.getPlannerSettings(call.getPlanner()).isHashAggEnabled();
  }

  @Override
  public void onMatch(RelOptRuleCall call) {
    final DrillAggregateRel aggregate = (DrillAggregateRel) call.rel(0);
    final RelNode input = call.rel(1);

    if (aggregate.containsDistinctCall() || aggregate.getGroupCount() == 0) {
      // currently, don't use HashAggregate if any of the logical aggrs contains DISTINCT or
      // if there are no grouping keys
      return;
    }

    RelTraitSet traits = null;

    try {
      if (aggregate.getGroupSet().isEmpty()) {
        DrillDistributionTrait singleDist = DrillDistributionTrait.SINGLETON;
        traits = call.getPlanner().emptyTraitSet().plus(Prel.DRILL_PHYSICAL).plus(singleDist);
        createTransformRequest(call, aggregate, input, traits);
      } else {
        // hash distribute on all grouping keys
View Full Code Here

Examples of org.apache.drill.exec.planner.logical.DrillAggregateRel

    super(rule, "ConvertCountToDirectScan:" + id);
  }

  @Override
  public void onMatch(RelOptRuleCall call) {
    final DrillAggregateRel agg = (DrillAggregateRel) call.rel(0);
    final DrillScanRel scan = (DrillScanRel) call.rel(call.rels.length -1);
    final DrillProjectRel proj = call.rels.length == 3 ? (DrillProjectRel) call.rel(1) : null;

    GroupScan oldGrpScan = scan.getGroupScan();

    // Only apply the rule when :
    //    1) scan knows the exact row count in getSize() call,
    //    2) No GroupBY key,
    //    3) only one agg function (Check if it's count(*) below).
    //    4) No distinct agg call.
    if (! (oldGrpScan.getScanStats().getGroupScanProperty().hasExactRowCount()
        && agg.getGroupCount() == 0
        && agg.getAggCallList().size() == 1
        && !agg.containsDistinctCall())) {
      return;
    }

    AggregateCall aggCall = agg.getAggCallList().get(0);

    if (aggCall.getAggregation().getName().equals("COUNT") ) {

      long cnt = 0;
      //  count(*)  == >  empty arg  ==>  rowCount
      //  count(Not-null-input) ==> rowCount
      if (aggCall.getArgList().isEmpty() ||
          (aggCall.getArgList().size() == 1 &&
           ! agg.getChild().getRowType().getFieldList().get(aggCall.getArgList().get(0).intValue()).getType().isNullable())) {
        cnt = (long) oldGrpScan.getScanStats().getRecordCount();
      } else if (aggCall.getArgList().size() == 1) {
      // count(columnName) ==> Agg ( Scan )) ==> columnValueCount
        int index = aggCall.getArgList().get(0);
        String columnName = scan.getRowType().getFieldNames().get(index).toLowerCase();

        cnt = oldGrpScan.getColumnValueCount(SchemaPath.getSimplePath(columnName));
      } else {
        return; // do nothing.
      }

      RelDataType scanRowType = getCountDirectScanRowType(agg.getCluster().getTypeFactory());

      final ScanPrel newScan = ScanPrel.create(scan,
          scan.getTraitSet().plus(Prel.DRILL_PHYSICAL).plus(DrillDistributionTrait.SINGLETON), getCountDirectScan(cnt),
          scanRowType);

      List<RexNode> exprs = Lists.newArrayList();
      exprs.add(RexInputRef.of(0, scanRowType));

      final ProjectPrel newProj = new ProjectPrel(agg.getCluster(), agg.getTraitSet().plus(Prel.DRILL_PHYSICAL)
          .plus(DrillDistributionTrait.SINGLETON), newScan, exprs, agg.getRowType());

      call.transformTo(newProj);
    }

  }
View Full Code Here

Examples of org.apache.drill.exec.planner.logical.DrillAggregateRel

    return PrelUtil.getPlannerSettings(call.getPlanner()).isStreamAggEnabled();
  }

  @Override
  public void onMatch(RelOptRuleCall call) {
    final DrillAggregateRel aggregate = (DrillAggregateRel) call.rel(0);
    final RelNode input = aggregate.getChild();
    final RelCollation collation = getCollation(aggregate);
    RelTraitSet traits = null;

    if (aggregate.containsDistinctCall()) {
      // currently, don't use StreamingAggregate if any of the logical aggrs contains DISTINCT
      return;
    }

    try {
      if (aggregate.getGroupSet().isEmpty()) {
        DrillDistributionTrait singleDist = DrillDistributionTrait.SINGLETON;
        final RelTraitSet singleDistTrait = call.getPlanner().emptyTraitSet().plus(Prel.DRILL_PHYSICAL).plus(singleDist);

        if (create2PhasePlan(call, aggregate)) {
          traits = call.getPlanner().emptyTraitSet().plus(Prel.DRILL_PHYSICAL) ;

          RelNode convertedInput = convert(input, traits);
          new SubsetTransformer<DrillAggregateRel, InvalidRelException>(call){

            public RelNode convertChild(final DrillAggregateRel join, final RelNode rel) throws InvalidRelException {
              DrillDistributionTrait toDist = rel.getTraitSet().getTrait(DrillDistributionTraitDef.INSTANCE);
              RelTraitSet traits = newTraitSet(Prel.DRILL_PHYSICAL, toDist);
              RelNode newInput = convert(input, traits);

              StreamAggPrel phase1Agg = new StreamAggPrel(aggregate.getCluster(), traits, newInput,
                  aggregate.getGroupSet(),
                  aggregate.getAggCallList(),
                  OperatorPhase.PHASE_1of2);

              UnionExchangePrel exch =
                  new UnionExchangePrel(phase1Agg.getCluster(), singleDistTrait, phase1Agg);

              return  new StreamAggPrel(aggregate.getCluster(), singleDistTrait, exch,
                  aggregate.getGroupSet(),
                  phase1Agg.getPhase2AggCalls(),
                  OperatorPhase.PHASE_2of2);
            }
          }.go(aggregate, convertedInput);

        } else {
          createTransformRequest(call, aggregate, input, singleDistTrait);
        }
      } else {
        // hash distribute on all grouping keys
        final DrillDistributionTrait distOnAllKeys =
            new DrillDistributionTrait(DrillDistributionTrait.DistributionType.HASH_DISTRIBUTED,
                                       ImmutableList.copyOf(getDistributionField(aggregate, true)));

        traits = call.getPlanner().emptyTraitSet().plus(Prel.DRILL_PHYSICAL).plus(collation).plus(distOnAllKeys);
        createTransformRequest(call, aggregate, input, traits);

        // hash distribute on one grouping key
        DrillDistributionTrait distOnOneKey =
            new DrillDistributionTrait(DrillDistributionTrait.DistributionType.HASH_DISTRIBUTED,
                                       ImmutableList.copyOf(getDistributionField(aggregate, false)));

        traits = call.getPlanner().emptyTraitSet().plus(Prel.DRILL_PHYSICAL).plus(collation).plus(distOnOneKey);
        // Temporarily commenting out the single distkey plan since a few tpch queries (e.g 01.sql) get stuck
        // in VolcanoPlanner.canonize() method. Note that the corresponding single distkey plan for HashAggr works
        // ok.  One possibility is that here we have dist on single key but collation on all keys, so that
        // might be causing some problem.
        /// TODO: re-enable this plan after resolving the issue.
        // createTransformRequest(call, aggregate, input, traits);

        if (create2PhasePlan(call, aggregate)) {
          traits = call.getPlanner().emptyTraitSet().plus(Prel.DRILL_PHYSICAL) ;
          RelNode convertedInput = convert(input, traits);

          new SubsetTransformer<DrillAggregateRel, InvalidRelException>(call){

            public RelNode convertChild(final DrillAggregateRel aggregate, final RelNode rel) throws InvalidRelException {
              DrillDistributionTrait toDist = rel.getTraitSet().getTrait(DrillDistributionTraitDef.INSTANCE);
              RelTraitSet traits = newTraitSet(Prel.DRILL_PHYSICAL, collation, toDist);
              RelNode newInput = convert(input, traits);

              StreamAggPrel phase1Agg = new StreamAggPrel(aggregate.getCluster(), traits, newInput,
                  aggregate.getGroupSet(),
                  aggregate.getAggCallList(),
                  OperatorPhase.PHASE_1of2);

              int numEndPoints = PrelUtil.getSettings(phase1Agg.getCluster()).numEndPoints();

              HashToMergeExchangePrel exch =
                  new HashToMergeExchangePrel(phase1Agg.getCluster(), phase1Agg.getTraitSet().plus(Prel.DRILL_PHYSICAL).plus(distOnAllKeys),
                      phase1Agg, ImmutableList.copyOf(getDistributionField(aggregate, true)),
                      collation,
                      numEndPoints);

              return new StreamAggPrel(aggregate.getCluster(), exch.getTraitSet(), exch,
                  aggregate.getGroupSet(),
                  phase1Agg.getPhase2AggCalls(),
                  OperatorPhase.PHASE_2of2);
            }
          }.go(aggregate, convertedInput);
        }
View Full Code Here

Examples of org.apache.drill.exec.planner.logical.DrillAggregateRel

  public void onMatch(RelOptRuleCall call) {
    if (!PrelUtil.getPlannerSettings(call.getPlanner()).isHashAggEnabled()) {
      return;
    }

    final DrillAggregateRel aggregate = (DrillAggregateRel) call.rel(0);
    final RelNode input = call.rel(1);

    if (aggregate.containsDistinctCall() || aggregate.getGroupCount() == 0) {
      // currently, don't use HashAggregate if any of the logical aggrs contains DISTINCT or
      // if there are no grouping keys
      return;
    }

    RelTraitSet traits = null;

    try {
      if (aggregate.getGroupSet().isEmpty()) {
        DrillDistributionTrait singleDist = DrillDistributionTrait.SINGLETON;
        traits = call.getPlanner().emptyTraitSet().plus(Prel.DRILL_PHYSICAL).plus(singleDist);
        createTransformRequest(call, aggregate, input, traits);
      } else {
        // hash distribute on all grouping keys
View Full Code Here

Examples of org.apache.drill.exec.planner.logical.DrillAggregateRel

    return PrelUtil.getPlannerSettings(call.getPlanner()).isStreamAggEnabled();
  }

  @Override
  public void onMatch(RelOptRuleCall call) {
    final DrillAggregateRel aggregate = (DrillAggregateRel) call.rel(0);
    final RelNode input = aggregate.getChild();
    final RelCollation collation = getCollation(aggregate);
    RelTraitSet traits = null;

    if (aggregate.containsDistinctCall()) {
      // currently, don't use StreamingAggregate if any of the logical aggrs contains DISTINCT
      return;
    }

    try {
      if (aggregate.getGroupSet().isEmpty()) {
        DrillDistributionTrait singleDist = DrillDistributionTrait.SINGLETON;
        final RelTraitSet singleDistTrait = call.getPlanner().emptyTraitSet().plus(Prel.DRILL_PHYSICAL).plus(singleDist);

        if (create2PhasePlan(call, aggregate)) {
          traits = call.getPlanner().emptyTraitSet().plus(Prel.DRILL_PHYSICAL) ;

          RelNode convertedInput = convert(input, traits);
          new SubsetTransformer<DrillAggregateRel, InvalidRelException>(call){

            @Override
            public RelNode convertChild(final DrillAggregateRel join, final RelNode rel) throws InvalidRelException {
              DrillDistributionTrait toDist = rel.getTraitSet().getTrait(DrillDistributionTraitDef.INSTANCE);
              RelTraitSet traits = newTraitSet(Prel.DRILL_PHYSICAL, toDist);
              RelNode newInput = convert(input, traits);

              StreamAggPrel phase1Agg = new StreamAggPrel(aggregate.getCluster(), traits, newInput,
                  aggregate.getGroupSet(),
                  aggregate.getAggCallList(),
                  OperatorPhase.PHASE_1of2);

              UnionExchangePrel exch =
                  new UnionExchangePrel(phase1Agg.getCluster(), singleDistTrait, phase1Agg);

              return  new StreamAggPrel(aggregate.getCluster(), singleDistTrait, exch,
                  aggregate.getGroupSet(),
                  phase1Agg.getPhase2AggCalls(),
                  OperatorPhase.PHASE_2of2);
            }
          }.go(aggregate, convertedInput);

        } else {
          createTransformRequest(call, aggregate, input, singleDistTrait);
        }
      } else {
        // hash distribute on all grouping keys
        final DrillDistributionTrait distOnAllKeys =
            new DrillDistributionTrait(DrillDistributionTrait.DistributionType.HASH_DISTRIBUTED,
                                       ImmutableList.copyOf(getDistributionField(aggregate, true)));

        traits = call.getPlanner().emptyTraitSet().plus(Prel.DRILL_PHYSICAL).plus(collation).plus(distOnAllKeys);
        createTransformRequest(call, aggregate, input, traits);

        // hash distribute on one grouping key
        DrillDistributionTrait distOnOneKey =
            new DrillDistributionTrait(DrillDistributionTrait.DistributionType.HASH_DISTRIBUTED,
                                       ImmutableList.copyOf(getDistributionField(aggregate, false)));

        traits = call.getPlanner().emptyTraitSet().plus(Prel.DRILL_PHYSICAL).plus(collation).plus(distOnOneKey);
        // Temporarily commenting out the single distkey plan since a few tpch queries (e.g 01.sql) get stuck
        // in VolcanoPlanner.canonize() method. Note that the corresponding single distkey plan for HashAggr works
        // ok.  One possibility is that here we have dist on single key but collation on all keys, so that
        // might be causing some problem.
        /// TODO: re-enable this plan after resolving the issue.
        // createTransformRequest(call, aggregate, input, traits);

        if (create2PhasePlan(call, aggregate)) {
          traits = call.getPlanner().emptyTraitSet().plus(Prel.DRILL_PHYSICAL) ;
          RelNode convertedInput = convert(input, traits);

          new SubsetTransformer<DrillAggregateRel, InvalidRelException>(call){

            @Override
            public RelNode convertChild(final DrillAggregateRel aggregate, final RelNode rel) throws InvalidRelException {
              DrillDistributionTrait toDist = rel.getTraitSet().getTrait(DrillDistributionTraitDef.INSTANCE);
              RelTraitSet traits = newTraitSet(Prel.DRILL_PHYSICAL, collation, toDist);
              RelNode newInput = convert(input, traits);

              StreamAggPrel phase1Agg = new StreamAggPrel(aggregate.getCluster(), traits, newInput,
                  aggregate.getGroupSet(),
                  aggregate.getAggCallList(),
                  OperatorPhase.PHASE_1of2);

              int numEndPoints = PrelUtil.getSettings(phase1Agg.getCluster()).numEndPoints();

              HashToMergeExchangePrel exch =
                  new HashToMergeExchangePrel(phase1Agg.getCluster(), phase1Agg.getTraitSet().plus(Prel.DRILL_PHYSICAL).plus(distOnAllKeys),
                      phase1Agg, ImmutableList.copyOf(getDistributionField(aggregate, true)),
                      collation,
                      numEndPoints);

              return new StreamAggPrel(aggregate.getCluster(), exch.getTraitSet(), exch,
                  aggregate.getGroupSet(),
                  phase1Agg.getPhase2AggCalls(),
                  OperatorPhase.PHASE_2of2);
            }
          }.go(aggregate, convertedInput);
        }
View Full Code Here

Examples of org.apache.drill.exec.planner.logical.DrillAggregateRel

    super(rule, "ConvertCountToDirectScan:" + id);
  }

  @Override
  public void onMatch(RelOptRuleCall call) {
    final DrillAggregateRel agg = (DrillAggregateRel) call.rel(0);
    final DrillScanRel scan = (DrillScanRel) call.rel(call.rels.length -1);
    final DrillProjectRel proj = call.rels.length == 3 ? (DrillProjectRel) call.rel(1) : null;

    GroupScan oldGrpScan = scan.getGroupScan();

    // Only apply the rule when :
    //    1) scan knows the exact row count in getSize() call,
    //    2) No GroupBY key,
    //    3) only one agg function (Check if it's count(*) below).
    //    4) No distinct agg call.
    if (! (oldGrpScan.getScanStats().getGroupScanProperty().hasExactRowCount()
        && agg.getGroupCount() == 0
        && agg.getAggCallList().size() == 1
        && !agg.containsDistinctCall())) {
      return;
    }

    AggregateCall aggCall = agg.getAggCallList().get(0);

    if (aggCall.getAggregation().getName().equals("COUNT") ) {

      long cnt = 0;
      //  count(*)  == >  empty arg  ==>  rowCount
      //  count(Not-null-input) ==> rowCount
      if (aggCall.getArgList().isEmpty() ||
          (aggCall.getArgList().size() == 1 &&
           ! agg.getChild().getRowType().getFieldList().get(aggCall.getArgList().get(0).intValue()).getType().isNullable())) {
        cnt = (long) oldGrpScan.getScanStats().getRecordCount();
      } else if (aggCall.getArgList().size() == 1) {
      // count(columnName) ==> Agg ( Scan )) ==> columnValueCount
        int index = aggCall.getArgList().get(0);
        String columnName = scan.getRowType().getFieldNames().get(index).toLowerCase();

        cnt = oldGrpScan.getColumnValueCount(SchemaPath.getSimplePath(columnName));
      } else {
        return; // do nothing.
      }

      RelDataType scanRowType = getCountDirectScanRowType(agg.getCluster().getTypeFactory());

      final ScanPrel newScan = ScanPrel.create(scan,
          scan.getTraitSet().plus(Prel.DRILL_PHYSICAL).plus(DrillDistributionTrait.SINGLETON), getCountDirectScan(cnt),
          scanRowType);

      List<RexNode> exprs = Lists.newArrayList();
      exprs.add(RexInputRef.of(0, scanRowType));

      final ProjectPrel newProj = new ProjectPrel(agg.getCluster(), agg.getTraitSet().plus(Prel.DRILL_PHYSICAL)
          .plus(DrillDistributionTrait.SINGLETON), newScan, exprs, agg.getRowType());

      call.transformTo(newProj);
    }

  }
View Full Code Here

Examples of org.apache.drill.exec.planner.logical.DrillAggregateRel

  @Override
  public void onMatch(RelOptRuleCall call) {
    if (!PrelUtil.getPlannerSettings(call.getPlanner()).isHashAggEnabled()) return;

    final DrillAggregateRel aggregate = (DrillAggregateRel) call.rel(0);
    final RelNode input = call.rel(1);

    if (aggregate.containsDistinctCall() || aggregate.getGroupCount() == 0) {
      // currently, don't use HashAggregate if any of the logical aggrs contains DISTINCT or
      // if there are no grouping keys
      return;
    }

    RelTraitSet traits = null;

    try {
      if (aggregate.getGroupSet().isEmpty()) {
        DrillDistributionTrait singleDist = DrillDistributionTrait.SINGLETON;
        traits = call.getPlanner().emptyTraitSet().plus(Prel.DRILL_PHYSICAL).plus(singleDist);
        createTransformRequest(call, aggregate, input, traits);
      } else {
        // hash distribute on all grouping keys
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.