Package org.apache.flink.compiler.costs

Examples of org.apache.flink.compiler.costs.Costs


      GroupReduceNode combinerNode = ((GroupReduceNode) node).getCombinerUtilityNode();
      combinerNode.setDegreeOfParallelism(in.getSource().getDegreeOfParallelism());

      SingleInputPlanNode combiner = new SingleInputPlanNode(combinerNode, "Combine("+node.getPactContract()
          .getName()+")", toCombiner, DriverStrategy.SORTED_GROUP_COMBINE);
      combiner.setCosts(new Costs(0, 0));
      combiner.initProperties(toCombiner.getGlobalProperties(), toCombiner.getLocalProperties());
      // set sorting comparator key info
      combiner.setDriverKeyInfo(in.getLocalStrategyKeys(), in.getLocalStrategySortOrder(), 0);
      // set grouping comparator key info
      combiner.setDriverKeyInfo(this.keyList, 1);
View Full Code Here


      // create an input node for combine with same DOP as input node
      ReduceNode combinerNode = ((ReduceNode) node).getCombinerUtilityNode();
      combinerNode.setDegreeOfParallelism(in.getSource().getDegreeOfParallelism());

      SingleInputPlanNode combiner = new SingleInputPlanNode(combinerNode, "Combine ("+node.getPactContract().getName()+")", toCombiner, DriverStrategy.ALL_REDUCE);
      combiner.setCosts(new Costs(0, 0));
      combiner.initProperties(toCombiner.getGlobalProperties(), toCombiner.getLocalProperties());
     
      Channel toReducer = new Channel(combiner);
      toReducer.setShipStrategy(in.getShipStrategy(), in.getShipStrategyKeys(), in.getShipStrategySortOrder());
      toReducer.setLocalStrategy(in.getLocalStrategy(), in.getLocalStrategyKeys(), in.getLocalStrategySortOrder());
View Full Code Here

      // create an input node for combine with same DOP as input node
      ReduceNode combinerNode = ((ReduceNode) node).getCombinerUtilityNode();
      combinerNode.setDegreeOfParallelism(in.getSource().getDegreeOfParallelism());

      SingleInputPlanNode combiner = new SingleInputPlanNode(combinerNode, "Combine ("+node.getPactContract().getName()+")", toCombiner, DriverStrategy.SORTED_PARTIAL_REDUCE, this.keyList);
      combiner.setCosts(new Costs(0, 0));
      combiner.initProperties(toCombiner.getGlobalProperties(), toCombiner.getLocalProperties());
     
      Channel toReducer = new Channel(combiner);
      toReducer.setShipStrategy(in.getShipStrategy(), in.getShipStrategyKeys(), in.getShipStrategySortOrder());
      toReducer.setLocalStrategy(LocalStrategy.SORT, in.getLocalStrategyKeys(), in.getLocalStrategySortOrder());
View Full Code Here

    }
   
    SourcePlanNode candidate = new SourcePlanNode(this, "DataSource ("+this.getPactContract().getName()+")");
    candidate.updatePropertiesWithUniqueSets(getUniqueFields());
   
    final Costs costs = new Costs();
    if (FileInputFormat.class.isAssignableFrom(getPactContract().getFormatWrapper().getUserCodeClass()) &&
        this.estimatedOutputSize >= 0)
    {
      estimator.addFileInputCost(this.estimatedOutputSize, costs);
    }
View Full Code Here

  public Costs getCumulativeCostsShare() {
    if (this.cumulativeCosts == null){
      return null;
    } else {
      Costs result = cumulativeCosts.clone();
      if (this.template != null && this.template.getOutgoingConnections() != null) {
        int outDegree = this.template.getOutgoingConnections().size();
        if (outDegree > 0) {
          result.divideBy(outDegree);
        }
      }

      return result;
    }
View Full Code Here

    this.cumulativeCosts = nodeCosts.clone();
   
    // add all the normal inputs
    for (PlanNode pred : getPredecessors()) {
     
      Costs parentCosts = pred.getCumulativeCostsShare();
      if (parentCosts != null) {
        this.cumulativeCosts.addCosts(parentCosts);
      } else {
        throw new CompilerException("Trying to set the costs of an operator before the predecessor costs are computed.");
      }
    }
   
    // add all broadcast variable inputs
    if (this.broadcastInputs != null) {
      for (NamedChannel nc : this.broadcastInputs) {
        Costs bcInputCost = nc.getSource().getCumulativeCostsShare();
        if (bcInputCost != null) {
          this.cumulativeCosts.addCosts(bcInputCost);
        } else {
          throw new CompilerException("Trying to set the costs of an operator before the broadcast input costs are computed.");
        }
View Full Code Here

    // the plan enumeration logic works as for regular two-input-operators, which is important
    // because of the branch handling logic. it does pick redistributing network channels
    // between the sink and the sink joiner, because sinks joiner has a different DOP than the sink.
    // we discard any cost and simply use the sum of the costs from the two children.
   
    Costs totalCosts = getInput1().getSource().getCumulativeCosts().clone();
    totalCosts.addCosts(getInput2().getSource().getCumulativeCosts());
    super.setCosts(totalCosts);
  }
View Full Code Here

      // create an input node for combine with same DOP as input node
      GroupReduceNode combinerNode = ((GroupReduceNode) node).getCombinerUtilityNode();
      combinerNode.setDegreeOfParallelism(in.getSource().getDegreeOfParallelism());

      SingleInputPlanNode combiner = new SingleInputPlanNode(combinerNode, "Combine ("+node.getPactContract().getName()+")", toCombiner, DriverStrategy.ALL_GROUP_COMBINE);
      combiner.setCosts(new Costs(0, 0));
      combiner.initProperties(toCombiner.getGlobalProperties(), toCombiner.getLocalProperties());
     
      Channel toReducer = new Channel(combiner);
      toReducer.setShipStrategy(in.getShipStrategy(), in.getShipStrategyKeys(), in.getShipStrategySortOrder());
      toReducer.setLocalStrategy(in.getLocalStrategy(), in.getLocalStrategyKeys(), in.getLocalStrategySortOrder());
View Full Code Here

    testShipStrategiesIsolated(BIG_ESTIMATES, 1);
    testShipStrategiesIsolated(BIG_ESTIMATES, 10);
  }
 
  private void testShipStrategiesIsolated(EstimateProvider estimates, int targetParallelism) {
    Costs random = new Costs();
    costEstimator.addRandomPartitioningCost(estimates, random);
   
    Costs hash = new Costs();
    costEstimator.addHashPartitioningCost(estimates, hash);
   
    Costs range = new Costs();
    costEstimator.addRangePartitionCost(estimates, range);
   
    Costs broadcast = new Costs();
    costEstimator.addBroadcastCost(estimates, targetParallelism, broadcast);
   
    int randomVsHash = random.compareTo(hash);
    int hashVsRange = hash.compareTo(range);
    int hashVsBroadcast = hash.compareTo(broadcast);
View Full Code Here

 
  // --------------------------------------------------------------------------------------------
 
  @Test
  public void testShipStrategyCombinationsPlain() {
    Costs hashBothSmall = new Costs();
    Costs hashSmallAndLarge = new Costs();
    Costs hashBothLarge = new Costs();
   
    Costs hashSmallBcLarge10 = new Costs();
    Costs hashLargeBcSmall10 = new Costs();
   
    Costs hashSmallBcLarge1000 = new Costs();
    Costs hashLargeBcSmall1000 = new Costs();
   
    Costs forwardSmallBcLarge10 = new Costs();
    Costs forwardLargeBcSmall10 = new Costs();
   
    Costs forwardSmallBcLarge1000 = new Costs();
    Costs forwardLargeBcSmall1000 = new Costs();
   
    costEstimator.addHashPartitioningCost(MEDIUM_ESTIMATES, hashBothSmall);
    costEstimator.addHashPartitioningCost(MEDIUM_ESTIMATES, hashBothSmall);
   
    costEstimator.addHashPartitioningCost(MEDIUM_ESTIMATES, hashSmallAndLarge);
    costEstimator.addHashPartitioningCost(BIG_ESTIMATES, hashSmallAndLarge);
   
    costEstimator.addHashPartitioningCost(BIG_ESTIMATES, hashBothLarge);
    costEstimator.addHashPartitioningCost(BIG_ESTIMATES, hashBothLarge);
   
    costEstimator.addHashPartitioningCost(MEDIUM_ESTIMATES, hashSmallBcLarge10);
    costEstimator.addBroadcastCost(BIG_ESTIMATES, 10, hashSmallBcLarge10);
   
    costEstimator.addHashPartitioningCost(BIG_ESTIMATES, hashLargeBcSmall10);
    costEstimator.addBroadcastCost(MEDIUM_ESTIMATES, 10, hashLargeBcSmall10);
   
    costEstimator.addHashPartitioningCost(MEDIUM_ESTIMATES, hashSmallBcLarge1000);
    costEstimator.addBroadcastCost(BIG_ESTIMATES, 1000, hashSmallBcLarge1000);
   
    costEstimator.addHashPartitioningCost(BIG_ESTIMATES, hashLargeBcSmall1000);
    costEstimator.addBroadcastCost(MEDIUM_ESTIMATES, 1000, hashLargeBcSmall1000);
   
    costEstimator.addBroadcastCost(BIG_ESTIMATES, 10, forwardSmallBcLarge10);
   
    costEstimator.addBroadcastCost(MEDIUM_ESTIMATES, 10, forwardLargeBcSmall10);
   
    costEstimator.addBroadcastCost(BIG_ESTIMATES, 1000, forwardSmallBcLarge1000);
   
    costEstimator.addBroadcastCost(MEDIUM_ESTIMATES, 1000, forwardLargeBcSmall1000);
   
    // hash cost is roughly monotonous
    assertTrue(hashBothSmall.compareTo(hashSmallAndLarge) < 0);
    assertTrue(hashSmallAndLarge.compareTo(hashBothLarge) < 0);
   
    // broadcast the smaller is better
    assertTrue(hashLargeBcSmall10.compareTo(hashSmallBcLarge10) < 0);
    assertTrue(forwardLargeBcSmall10.compareTo(forwardSmallBcLarge10) < 0);
    assertTrue(hashLargeBcSmall1000.compareTo(hashSmallBcLarge1000) < 0);
    assertTrue(forwardLargeBcSmall1000.compareTo(forwardSmallBcLarge1000) < 0);
   
    // broadcasting small and forwarding large is better than partition both, given size difference
    assertTrue(forwardLargeBcSmall10.compareTo(hashSmallAndLarge) < 0);
   
    // broadcasting too far is expensive again
    assertTrue(forwardLargeBcSmall1000.compareTo(hashSmallAndLarge) > 0);
   
    // assert weight is respected
    assertTrue(hashSmallBcLarge10.compareTo(hashSmallBcLarge1000) < 0);
    assertTrue(hashLargeBcSmall10.compareTo(hashLargeBcSmall1000) < 0);
    assertTrue(forwardSmallBcLarge10.compareTo(forwardSmallBcLarge1000) < 0);
    assertTrue(forwardLargeBcSmall10.compareTo(forwardLargeBcSmall1000) < 0);
   
    // forward versus hash
    assertTrue(forwardSmallBcLarge10.compareTo(hashSmallBcLarge10) < 0);
    assertTrue(forwardSmallBcLarge1000.compareTo(hashSmallBcLarge1000) < 0);
    assertTrue(forwardLargeBcSmall10.compareTo(hashLargeBcSmall10) < 0);
    assertTrue(forwardLargeBcSmall1000.compareTo(hashLargeBcSmall1000) < 0);
  }
View Full Code Here

TOP

Related Classes of org.apache.flink.compiler.costs.Costs

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.