Package org.apache.flink.compiler.plan

Examples of org.apache.flink.compiler.plan.PlanNode


    //    this translates to a local strategy that would only be executed in the first superstep
   
    final boolean merge;
    if (mergeIterationAuxTasks && wspn.getOutgoingChannels().size() == 1) {
      final Channel c = wspn.getOutgoingChannels().get(0);
      final PlanNode successor = c.getTarget();
      merge = c.getShipStrategy() == ShipStrategyType.FORWARD &&
          c.getLocalStrategy() == LocalStrategy.NONE &&
          c.getTempMode() == TempMode.NONE &&
          successor.getDegreeOfParallelism() == wspn.getDegreeOfParallelism() &&
          !(successor instanceof NAryUnionPlanNode) &&
          successor != iteration.getNextWorkSetPlanNode() &&
          iteration.getInitialWorksetInput().getLocalStrategy() == LocalStrategy.NONE;
    } else {
      merge = false;
    }
   
    // create or adopt the head vertex
    final AbstractJobVertex toReturn;
    final AbstractJobVertex headVertex;
    final TaskConfig headConfig;
    if (merge) {
      final PlanNode successor = wspn.getOutgoingChannels().get(0).getTarget();
      headVertex = (AbstractJobVertex) this.vertices.get(successor);
     
      if (headVertex == null) {
        throw new CompilerException(
          "Bug: Trying to merge solution set with its sucessor, but successor has not been created.");
View Full Code Here


    // connect the sync task
    sync.connectNewDataSetAsInput(headVertex, DistributionPattern.POINTWISE);
   
    // ----------------------------- create the iteration tail ------------------------------
   
    final PlanNode rootOfTerminationCriterion = bulkNode.getRootOfTerminationCriterion();
    final PlanNode rootOfStepFunction = bulkNode.getRootOfStepFunction();
    final TaskConfig tailConfig;
   
    AbstractJobVertex rootOfStepFunctionVertex = (AbstractJobVertex) this.vertices.get(rootOfStepFunction);
    if (rootOfStepFunctionVertex == null) {
      // last op is chained
      final TaskInChain taskInChain = this.chainedTasks.get(rootOfStepFunction);
      if (taskInChain == null) {
        throw new CompilerException("Bug: Tail of step function not found as vertex or chained task.");
      }
      rootOfStepFunctionVertex = (AbstractJobVertex) taskInChain.getContainingVertex();

      // the fake channel is statically typed to pact record. no data is sent over this channel anyways.
      tailConfig = taskInChain.getTaskConfig();
    } else {
      tailConfig = new TaskConfig(rootOfStepFunctionVertex.getConfiguration());
    }
   
    tailConfig.setIsWorksetUpdate();
   
    // No following termination criterion
    if (rootOfStepFunction.getOutgoingChannels().isEmpty()) {
     
      rootOfStepFunctionVertex.setInvokableClass(IterationTailPactTask.class);
     
      tailConfig.setOutputSerializer(bulkNode.getSerializerForIterationChannel());
    }
View Full Code Here

      // we have three possible cases:
      // 1) Two tails, one for workset update, one for solution set update
      // 2) One tail for workset update, solution set update happens in an intermediate task
      // 3) One tail for solution set update, workset update happens in an intermediate task
     
      final PlanNode nextWorksetNode = iterNode.getNextWorkSetPlanNode();
      final PlanNode solutionDeltaNode = iterNode.getSolutionSetDeltaPlanNode();
     
      final boolean hasWorksetTail = nextWorksetNode.getOutgoingChannels().isEmpty();
      final boolean hasSolutionSetTail = (!iterNode.isImmediateSolutionSetUpdate()) || (!hasWorksetTail);
     
      {
View Full Code Here

    // Make sure that the workset candidates fulfill the input requirements
    {
      List<PlanNode> newCandidates = new ArrayList<PlanNode>();
     
      for (Iterator<PlanNode> planDeleter = worksetCandidates.iterator(); planDeleter.hasNext(); ) {
        PlanNode candidate = planDeleter.next();
       
        GlobalProperties atEndGlobal = candidate.getGlobalProperties();
        LocalProperties atEndLocal = candidate.getLocalProperties();
       
        FeedbackPropertiesMeetRequirementsReport report = candidate.checkPartialSolutionPropertiesMet(wspn, atEndGlobal, atEndLocal);
        if (report == FeedbackPropertiesMeetRequirementsReport.NO_PARTIAL_SOLUTION) {
          ; // depends only through broadcast variable on the workset solution
        }
        else if (report == FeedbackPropertiesMeetRequirementsReport.NOT_MET) {
          // attach a no-op node through which we create the properties of the original input
          Channel toNoOp = new Channel(candidate);
          globPropsReqWorkset.parameterizeChannel(toNoOp, false);
          locPropsReqWorkset.parameterizeChannel(toNoOp);
         
          UnaryOperatorNode rebuildWorksetPropertiesNode = new UnaryOperatorNode("Rebuild Workset Properties", FieldList.EMPTY_LIST);
         
          rebuildWorksetPropertiesNode.setDegreeOfParallelism(candidate.getDegreeOfParallelism());
         
          SingleInputPlanNode rebuildWorksetPropertiesPlanNode = new SingleInputPlanNode(rebuildWorksetPropertiesNode, "Rebuild Workset Properties", toNoOp, DriverStrategy.UNARY_NO_OP);
          rebuildWorksetPropertiesPlanNode.initProperties(toNoOp.getGlobalProperties(), toNoOp.getLocalProperties());
          estimator.costOperator(rebuildWorksetPropertiesPlanNode);
           
          GlobalProperties atEndGlobalModified = rebuildWorksetPropertiesPlanNode.getGlobalProperties();
          LocalProperties atEndLocalModified = rebuildWorksetPropertiesPlanNode.getLocalProperties();
           
          if (!(atEndGlobalModified.equals(atEndGlobal) && atEndLocalModified.equals(atEndLocal))) {
            FeedbackPropertiesMeetRequirementsReport report2 = candidate.checkPartialSolutionPropertiesMet(wspn, atEndGlobalModified, atEndLocalModified);
           
            if (report2 != FeedbackPropertiesMeetRequirementsReport.NOT_MET) {
              newCandidates.add(rebuildWorksetPropertiesPlanNode);
            }
          }
         
          // remove the original operator and add the modified candidate
          planDeleter.remove();
         
        }
      }
     
      worksetCandidates.addAll(newCandidates);
    }
   
    if (worksetCandidates.isEmpty()) {
      return;
    }
   
    // sanity check the solution set delta
    for (Iterator<PlanNode> deltaPlans = solutionSetDeltaCandidates.iterator(); deltaPlans.hasNext(); ) {
      SingleInputPlanNode candidate = (SingleInputPlanNode) deltaPlans.next();
      GlobalProperties gp = candidate.getGlobalProperties();
     
      if (gp.getPartitioning() != PartitioningProperty.HASH_PARTITIONED || gp.getPartitioningFields() == null ||
          !gp.getPartitioningFields().equals(this.solutionSetKeyFields))
      {
        throw new CompilerException("Bug: The solution set delta is not partitioned.");
View Full Code Here

  protected void instantiate(OperatorDescriptorDual operator, Channel in1, Channel in2,
      List<Set<? extends NamedChannel>> broadcastPlanChannels, List<PlanNode> target, CostEstimator estimator,
      RequestedGlobalProperties globPropsReq1, RequestedGlobalProperties globPropsReq2,
      RequestedLocalProperties locPropsReq1, RequestedLocalProperties locPropsReq2)
  {
    final PlanNode inputSource1 = in1.getSource();
    final PlanNode inputSource2 = in2.getSource();
   
    for (List<NamedChannel> broadcastChannelsCombination: Sets.cartesianProduct(broadcastPlanChannels)) {
     
      boolean validCombination = true;
     
      // check whether the broadcast inputs use the same plan candidate at the branching point
      for (int i = 0; i < broadcastChannelsCombination.size(); i++) {
        NamedChannel nc = broadcastChannelsCombination.get(i);
        PlanNode bcSource = nc.getSource();
       
        if (!(areBranchCompatible(bcSource, inputSource1) || areBranchCompatible(bcSource, inputSource2))) {
          validCombination = false;
          break;
        }
       
        // check branch compatibility against all other broadcast variables
        for (int k = 0; k < i; k++) {
          PlanNode otherBcSource = broadcastChannelsCombination.get(k).getSource();
         
          if (!areBranchCompatible(bcSource, otherBcSource)) {
            validCombination = false;
            break;
          }
View Full Code Here

     
      if (strategy.firstDam() == DamBehavior.FULL_DAM || in1.getLocalStrategy().dams() || in1.getTempMode().breaksPipeline()) {
        someDamOnLeftPaths = true;
      } else {
        for (OptimizerNode brancher : this.hereJoinedBranches) {
          PlanNode candAtBrancher = in1.getSource().getCandidateAtBranchPoint(brancher);
         
          // not all candidates are found, because this list includes joined branched from both regular inputs and broadcast vars
          if (candAtBrancher == null) {
            continue;
          }
         
          SourceAndDamReport res = in1.getSource().hasDamOnPathDownTo(candAtBrancher);
          if (res == NOT_FOUND) {
            throw new CompilerException("Bug: Tracing dams for deadlock detection is broken.");
          } else if (res == FOUND_SOURCE) {
            damOnAllLeftPaths = false;
          } else if (res == FOUND_SOURCE_AND_DAM) {
            someDamOnLeftPaths = true;
          } else {
            throw new CompilerException();
          }
        }
      }
     
      if (strategy.secondDam() == DamBehavior.FULL_DAM || in2.getLocalStrategy().dams() || in2.getTempMode().breaksPipeline()) {
        someDamOnRightPaths = true;
      } else {
        for (OptimizerNode brancher : this.hereJoinedBranches) {
          PlanNode candAtBrancher = in2.getSource().getCandidateAtBranchPoint(brancher);
         
          // not all candidates are found, because this list includes joined branched from both regular inputs and broadcast vars
          if (candAtBrancher == null) {
            continue;
          }
View Full Code Here

      // check the iteration
      WorksetIterationPlanNode iteration = (WorksetIterationPlanNode) sink.getInput().getSource();
      assertEquals(DEFAULT_PARALLELISM, iteration.getDegreeOfParallelism());
     
      // check the solution set join and the delta
      PlanNode ssDelta = iteration.getSolutionSetDeltaPlanNode();
      assertTrue(ssDelta instanceof DualInputPlanNode); // this is only true if the update functions preserves the partitioning
     
      DualInputPlanNode ssJoin = (DualInputPlanNode) ssDelta;
      assertEquals(DEFAULT_PARALLELISM, ssJoin.getDegreeOfParallelism());
      assertEquals(ShipStrategyType.PARTITION_HASH, ssJoin.getInput1().getShipStrategy());
View Full Code Here

      // check the iteration
      WorksetIterationPlanNode iteration = (WorksetIterationPlanNode) sink.getInput().getSource();
      assertEquals(DEFAULT_PARALLELISM, iteration.getDegreeOfParallelism());
     
      // check the solution set join and the delta
      PlanNode ssDelta = iteration.getSolutionSetDeltaPlanNode();
      assertTrue(ssDelta instanceof DualInputPlanNode); // this is only true if the update functions preserves the partitioning
     
      DualInputPlanNode ssJoin = (DualInputPlanNode) ssDelta;
      assertEquals(DEFAULT_PARALLELISM, ssJoin.getDegreeOfParallelism());
      assertEquals(ShipStrategyType.PARTITION_HASH, ssJoin.getInput1().getShipStrategy());
View Full Code Here

   
    //---------------------------------------------------------------------------------------
    // the part below here is relevant only to plan nodes with concrete strategies, etc
    //---------------------------------------------------------------------------------------

    final PlanNode p = node.getPlanNode();
    if (p == null) {
      // finish node
      writer.print("\n\t}");
      return true;
    }
    // local strategy
    String locString = null;
    if (p.getDriverStrategy() != null) {
      switch (p.getDriverStrategy()) {
      case NONE:
      case BINARY_NO_OP:
        break;
       
      case UNARY_NO_OP:
        locString = "No-Op";
        break;
       
      case COLLECTOR_MAP:
      case MAP:
        locString = "Map";
        break;
       
      case FLAT_MAP:
        locString = "FlatMap";
        break;
       
      case MAP_PARTITION:
        locString = "Map Partition";
        break;
     
      case ALL_REDUCE:
        locString = "Reduce All";
        break;
     
      case ALL_GROUP_REDUCE:
      case ALL_GROUP_COMBINE:
        locString = "Group Reduce All";
        break;
       
      case SORTED_REDUCE:
        locString = "Sorted Reduce";
        break;
       
      case SORTED_PARTIAL_REDUCE:
        locString = "Sorted Combine/Reduce";
        break;

      case SORTED_GROUP_REDUCE:
        locString = "Sorted Group Reduce";
        break;
       
      case SORTED_GROUP_COMBINE:
        locString = "Sorted Combine";
        break;

      case HYBRIDHASH_BUILD_FIRST:
        locString = "Hybrid Hash (build: " + child1name + ")";
        break;
      case HYBRIDHASH_BUILD_SECOND:
        locString = "Hybrid Hash (build: " + child2name + ")";
        break;
       
      case HYBRIDHASH_BUILD_FIRST_CACHED:
        locString = "Hybrid Hash (CACHED) (build: " + child1name + ")";
        break;
      case HYBRIDHASH_BUILD_SECOND_CACHED:
        locString = "Hybrid Hash (CACHED) (build: " + child2name + ")";
        break;

      case NESTEDLOOP_BLOCKED_OUTER_FIRST:
        locString = "Nested Loops (Blocked Outer: " + child1name + ")";
        break;
      case NESTEDLOOP_BLOCKED_OUTER_SECOND:
        locString = "Nested Loops (Blocked Outer: " + child2name + ")";
        break;
      case NESTEDLOOP_STREAMED_OUTER_FIRST:
        locString = "Nested Loops (Streamed Outer: " + child1name + ")";
        break;
      case NESTEDLOOP_STREAMED_OUTER_SECOND:
        locString = "Nested Loops (Streamed Outer: " + child2name + ")";
        break;

      case MERGE:
        locString = "Merge";
        break;

      case CO_GROUP:
        locString = "Co-Group";
        break;

      default:
        locString = p.getDriverStrategy().name();
        break;
      }

      if (locString != null) {
        writer.print(",\n\t\t\"driver_strategy\": \"");
        writer.print(locString);
        writer.print("\"");
      }
    }
   
    {
      // output node global properties
      final GlobalProperties gp = p.getGlobalProperties();

      writer.print(",\n\t\t\"global_properties\": [\n");

      addProperty(writer, "Partitioning", gp.getPartitioning().name(), true);
      if (gp.getPartitioningFields() != null) {
        addProperty(writer, "Partitioned on", gp.getPartitioningFields().toString(), false);
      }
      if (gp.getPartitioningOrdering() != null) {
        addProperty(writer, "Partitioning Order", gp.getPartitioningOrdering().toString(), false)
      }
      else {
        addProperty(writer, "Partitioning Order", "(none)", false);
      }
      if (n.getUniqueFields() == null || n.getUniqueFields().size() == 0) {
        addProperty(writer, "Uniqueness", "not unique", false);
      }
      else {
        addProperty(writer, "Uniqueness", n.getUniqueFields().toString(), false)
      }

      writer.print("\n\t\t]");
    }

    {
      // output node local properties
      LocalProperties lp = p.getLocalProperties();

      writer.print(",\n\t\t\"local_properties\": [\n");

      if (lp.getOrdering() != null) {
        addProperty(writer, "Order", lp.getOrdering().toString(), true)
      }
      else {
        addProperty(writer, "Order", "(none)", true);
      }
      if (lp.getGroupedFields() != null && lp.getGroupedFields().size() > 0) {
        addProperty(writer, "Grouped on", lp.getGroupedFields().toString(), false);
      } else {
        addProperty(writer, "Grouping", "not grouped", false)
      }
      if (n.getUniqueFields() == null || n.getUniqueFields().size() == 0) {
        addProperty(writer, "Uniqueness", "not unique", false);
      }
      else {
        addProperty(writer, "Uniqueness", n.getUniqueFields().toString(), false)
      }

      writer.print("\n\t\t]");
    }

    // output node size estimates
    writer.print(",\n\t\t\"estimates\": [\n");

    addProperty(writer, "Est. Output Size", n.getEstimatedOutputSize() == -1 ? "(unknown)"
      : formatNumber(n.getEstimatedOutputSize(), "B"), true);
    addProperty(writer, "Est. Cardinality", n.getEstimatedNumRecords() == -1 ? "(unknown)"
      : formatNumber(n.getEstimatedNumRecords()), false);

    writer.print("\t\t]");

    // output node cost
    if (p.getNodeCosts() != null) {
      writer.print(",\n\t\t\"costs\": [\n");

      addProperty(writer, "Network", p.getNodeCosts().getNetworkCost() == -1 ? "(unknown)"
        : formatNumber(p.getNodeCosts().getNetworkCost(), "B"), true);
      addProperty(writer, "Disk I/O", p.getNodeCosts().getDiskCost() == -1 ? "(unknown)"
        : formatNumber(p.getNodeCosts().getDiskCost(), "B"), false);
      addProperty(writer, "CPU", p.getNodeCosts().getCpuCost() == -1 ? "(unknown)"
        : formatNumber(p.getNodeCosts().getCpuCost(), ""), false);

      addProperty(writer, "Cumulative Network",
        p.getCumulativeCosts().getNetworkCost() == -1 ? "(unknown)" : formatNumber(p
          .getCumulativeCosts().getNetworkCost(), "B"), false);
      addProperty(writer, "Cumulative Disk I/O",
        p.getCumulativeCosts().getDiskCost() == -1 ? "(unknown)" : formatNumber(p
          .getCumulativeCosts().getDiskCost(), "B"), false);
      addProperty(writer, "Cumulative CPU",
        p.getCumulativeCosts().getCpuCost() == -1 ? "(unknown)" : formatNumber(p
          .getCumulativeCosts().getCpuCost(), ""), false);

      writer.print("\n\t\t]");
    }
View Full Code Here

      if (visitable instanceof BinaryUnionPlanNode) {
        final BinaryUnionPlanNode unionNode = (BinaryUnionPlanNode) visitable;
        final Channel in1 = unionNode.getInput1();
        final Channel in2 = unionNode.getInput2();
     
        PlanNode newUnionNode;

        List<Channel> inputs = new ArrayList<Channel>();
        collect(in1, inputs);
        collect(in2, inputs);
View Full Code Here

TOP

Related Classes of org.apache.flink.compiler.plan.PlanNode

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.