Package eu.stratosphere.compiler.dag

Examples of eu.stratosphere.compiler.dag.OptimizerNode$UnclosedBranchDescriptor


      if (this.branchPlan == null) {
        this.branchPlan = new HashMap<OptimizerNode, PlanNode>(8);
      }
 
      for (UnclosedBranchDescriptor uc : this.template.getOpenBranches()) {
        OptimizerNode brancher = uc.getBranchingNode();
        PlanNode selectedCandidate = null;
 
        if (branchPlan1 != null) {
          // predecessor 1 has branching children, see if it got the branch we are looking for
          selectedCandidate = branchPlan1.get(brancher);
View Full Code Here


      if (this.branchPlan == null) {
        throw new CompilerException("Branching and rejoining logic did not find a candidate for the branching point.");
      }
 
      for (UnclosedBranchDescriptor uc : this.template.getOpenBranches()) {
        OptimizerNode brancher = uc.getBranchingNode();
        if (this.branchPlan.get(brancher) == null) {
          throw new CompilerException("Branching and rejoining logic did not find a candidate for the branching point.");
        }
      }
    }
View Full Code Here

      visit(child, writer, first);
      first = false;
    }
   
    // check if this node should be skipped from the dump
    final OptimizerNode n = node.getOptimizerNode();
   
    // ------------------ dump after the ascend ---------------------
    // start a new node and output node id
    if (!first) {
      writer.print(",\n");
    }
    // open the node
    writer.print("\t{\n");
   
    // recurse, it is is an iteration node
    if (node instanceof BulkIterationNode || node instanceof BulkIterationPlanNode) {
     
      DumpableNode<?> innerChild = node instanceof BulkIterationNode ?
          ((BulkIterationNode) node).getNextPartialSolution() :
          ((BulkIterationPlanNode) node).getRootOfStepFunction();
         
      DumpableNode<?> begin = node instanceof BulkIterationNode ?
        ((BulkIterationNode) node).getPartialSolution() :
        ((BulkIterationPlanNode) node).getPartialSolutionPlanNode();
     
      writer.print("\t\t\"step_function\": [\n");
     
      visit(innerChild, writer, true);
     
      writer.print("\n\t\t],\n");
      writer.print("\t\t\"partial_solution\": " + this.nodeIds.get(begin) + ",\n");
      writer.print("\t\t\"next_partial_solution\": " + this.nodeIds.get(innerChild) + ",\n");
    } else if (node instanceof WorksetIterationNode || node instanceof WorksetIterationPlanNode) {
     
      DumpableNode<?> worksetRoot = node instanceof WorksetIterationNode ?
          ((WorksetIterationNode) node).getNextWorkset() :
          ((WorksetIterationPlanNode) node).getNextWorkSetPlanNode();
      DumpableNode<?> solutionDelta = node instanceof WorksetIterationNode ?
          ((WorksetIterationNode) node).getSolutionSetDelta() :
          ((WorksetIterationPlanNode) node).getSolutionSetDeltaPlanNode();
         
      DumpableNode<?> workset = node instanceof WorksetIterationNode ?
            ((WorksetIterationNode) node).getWorksetNode() :
            ((WorksetIterationPlanNode) node).getWorksetPlanNode();
      DumpableNode<?> solutionSet = node instanceof WorksetIterationNode ?
            ((WorksetIterationNode) node).getSolutionSetNode() :
            ((WorksetIterationPlanNode) node).getSolutionSetPlanNode();
     
      writer.print("\t\t\"step_function\": [\n");
     
      visit(worksetRoot, writer, true);
      visit(solutionDelta, writer, false);
     
      writer.print("\n\t\t],\n");
      writer.print("\t\t\"workset\": " + this.nodeIds.get(workset) + ",\n");
      writer.print("\t\t\"solution_set\": " + this.nodeIds.get(solutionSet) + ",\n");
      writer.print("\t\t\"next_workset\": " + this.nodeIds.get(worksetRoot) + ",\n");
      writer.print("\t\t\"solution_delta\": " + this.nodeIds.get(solutionDelta) + ",\n");
    }
   
    // print the id
    writer.print("\t\t\"id\": " + this.nodeIds.get(node));

   
    final String type;
    final String contents;
    if (n instanceof DataSinkNode) {
      type = "sink";
      contents = n.getPactContract().toString();
    } else if (n instanceof DataSourceNode) {
      type = "source";
      contents = n.getPactContract().toString();
    } else if (n instanceof BulkIterationNode) {
      type = "bulk_iteration";
      contents = n.getPactContract().getName();
    } else if (n instanceof WorksetIterationNode) {
      type = "workset_iteration";
      contents = n.getPactContract().getName();
    } else if (n instanceof BinaryUnionNode) {
      type = "pact";
      contents = "";
    } else {
      type = "pact";
      contents = n.getPactContract().getName();
    }
   
    String name = n.getName();
    if (name.equals("Reduce") && (node instanceof SingleInputPlanNode) &&
        ((SingleInputPlanNode) node).getDriverStrategy() == DriverStrategy.SORTED_GROUP_COMBINE) {
      name = "Combine";
    }
   
    // output the type identifier
    writer.print(",\n\t\t\"type\": \"" + type + "\"");
   
    // output node name
    writer.print(",\n\t\t\"pact\": \"" + name + "\"");
   
    // output node contents
    writer.print(",\n\t\t\"contents\": \"" + contents + "\"");

    // degree of parallelism
    writer.print(",\n\t\t\"parallelism\": \""
      + (n.getDegreeOfParallelism() >= 1 ? n.getDegreeOfParallelism() : "default") + "\"");
   
    writer.print(",\n\t\t\"subtasks_per_instance\": \""
        + (n.getSubtasksPerInstance() >= 1 ? n.getSubtasksPerInstance() : "default") + "\"");

    // output node predecessors
    Iterator<? extends DumpableConnection<?>> inConns = node.getDumpableInputs();
    String child1name = "", child2name = "";

    if (inConns != null && inConns.hasNext()) {
      // start predecessor list
      writer.print(",\n\t\t\"predecessors\": [");
      int connNum = 0;
      int inputNum = 0;
     
      while (inConns.hasNext()) {
        final DumpableConnection<?> conn = inConns.next();
       
        final Collection<DumpableConnection<?>> inConnsForInput;
        if (conn.getSource() instanceof NAryUnionPlanNode) {
          inConnsForInput = new ArrayList<DumpableConnection<?>>();
         
          for (Iterator<? extends DumpableConnection<?>> inputOfUnion = conn.getSource().getDumpableInputs(); inputOfUnion.hasNext();) {
            inConnsForInput.add(inputOfUnion.next());
          }
        }
        else {
          inConnsForInput = Collections.<DumpableConnection<?>>singleton(conn);
        }
       
        for (DumpableConnection<?> inConn : inConnsForInput) {
          final DumpableNode<?> source = inConn.getSource();
          writer.print(connNum == 0 ? "\n" : ",\n");
          if (connNum == 0) {
            child1name += child1name.length() > 0 ? ", " : "";
            child1name += source.getOptimizerNode().getPactContract().getName();
          } else if (connNum == 1) {
            child2name += child2name.length() > 0 ? ", " : "";
            child2name = source.getOptimizerNode().getPactContract().getName();
          }
 
          // output predecessor id
          writer.print("\t\t\t{\"id\": " + this.nodeIds.get(source));
 
          // output connection side
          if (inConns.hasNext() || inputNum > 0) {
            writer.print(", \"side\": \"" + (inputNum == 0 ? "first" : "second") + "\"");
          }
          // output shipping strategy and channel type
          final Channel channel = (inConn instanceof Channel) ? (Channel) inConn : null;
          final ShipStrategyType shipType = channel != null ? channel.getShipStrategy() :
              ((PactConnection) inConn).getShipStrategy();
           
          String shipStrategy = null;
          if (shipType != null) {
            switch (shipType) {
            case NONE:
              // nothing
              break;
            case FORWARD:
              shipStrategy = "Forward";
              break;
            case BROADCAST:
              shipStrategy = "Broadcast";
              break;
            case PARTITION_HASH:
              shipStrategy = "Hash Partition";
              break;
            case PARTITION_RANGE:
              shipStrategy = "Range Partition";
              break;
            case PARTITION_LOCAL_HASH:
              shipStrategy = "Hash Partition (local)";
              break;
            case PARTITION_RANDOM:
              shipStrategy = "Redistribute";
              break;
            default:
              throw new CompilerException("Unknown ship strategy '" + conn.getShipStrategy().name()
                + "' in JSON generator.");
            }
          }
         
          if (channel != null && channel.getShipStrategyKeys() != null && channel.getShipStrategyKeys().size() > 0) {
            shipStrategy += " on " + (channel.getShipStrategySortOrder() == null ?
                channel.getShipStrategyKeys().toString() :
                Utils.createOrdering(channel.getShipStrategyKeys(), channel.getShipStrategySortOrder()).toString());
          }
 
          if (shipStrategy != null) {
            writer.print(", \"ship_strategy\": \"" + shipStrategy + "\"");
          }
         
          if (channel != null) {
            String localStrategy = null;
            switch (channel.getLocalStrategy()) {
            case NONE:
              break;
            case SORT:
              localStrategy = "Sort";
              break;
            case COMBININGSORT:
              localStrategy = "Sort (combining)";
              break;
            default:
              throw new CompilerException("Unknown local strategy " + channel.getLocalStrategy().name());
            }
           
            if (channel != null && channel.getLocalStrategyKeys() != null && channel.getLocalStrategyKeys().size() > 0) {
              localStrategy += " on " + (channel.getLocalStrategySortOrder() == null ?
                  channel.getLocalStrategyKeys().toString() :
                  Utils.createOrdering(channel.getLocalStrategyKeys(), channel.getLocalStrategySortOrder()).toString());
            }
           
            if (localStrategy != null) {
              writer.print(", \"local_strategy\": \"" + localStrategy + "\"");
            }
           
            if (channel != null && channel.getTempMode() != TempMode.NONE) {
              String tempMode = channel.getTempMode().toString();
              writer.print(", \"temp_mode\": \"" + tempMode + "\"");
            }
          }
         
          writer.print('}');
          connNum++;
        }
        inputNum++;
      }
      // finish predecessors
      writer.print("\n\t\t]");
    }
   
    //---------------------------------------------------------------------------------------
    // the part below here is relevant only to plan nodes with concrete strategies, etc
    //---------------------------------------------------------------------------------------

    final PlanNode p = node.getPlanNode();
    if (p == null) {
      // finish node
      writer.print("\n\t}");
      return;
    }
    // local strategy
    String locString = null;
    if (p.getDriverStrategy() != null) {
      switch (p.getDriverStrategy()) {
      case NONE:
      case BINARY_NO_OP:
        break;
       
      case UNARY_NO_OP:
        locString = "No-Op";
        break;
       
      case COLLECTOR_MAP:
      case MAP:
      case FLAT_MAP:
        locString = "Map";
        break;
     
      case ALL_REDUCE:
        locString = "Reduce All";
        break;
     
      case ALL_GROUP_REDUCE:
      case ALL_GROUP_COMBINE:
        locString = "Group Reduce All";
        break;
       
      case SORTED_REDUCE:
        locString = "Sorted Reduce";
        break;
       
      case SORTED_PARTIAL_REDUCE:
        locString = "Sorted Combine/Reduce";
        break;

      case SORTED_GROUP_REDUCE:
        locString = "Sorted Group Reduce";
        break;
       
      case SORTED_GROUP_COMBINE:
        locString = "Sorted Combine";
        break;

      case HYBRIDHASH_BUILD_FIRST:
        locString = "Hybrid Hash (build: " + child1name + ")";
        break;
      case HYBRIDHASH_BUILD_SECOND:
        locString = "Hybrid Hash (build: " + child2name + ")";
        break;

      case NESTEDLOOP_BLOCKED_OUTER_FIRST:
        locString = "Nested Loops (Blocked Outer: " + child1name + ")";
        break;
      case NESTEDLOOP_BLOCKED_OUTER_SECOND:
        locString = "Nested Loops (Blocked Outer: " + child2name + ")";
        break;
      case NESTEDLOOP_STREAMED_OUTER_FIRST:
        locString = "Nested Loops (Streamed Outer: " + child1name + ")";
        break;
      case NESTEDLOOP_STREAMED_OUTER_SECOND:
        locString = "Nested Loops (Streamed Outer: " + child2name + ")";
        break;

      case MERGE:
        locString = "Merge";
        break;

      case CO_GROUP:
        locString = "Co-Group";
        break;

      default:
        throw new CompilerException("Unknown local strategy '" + p.getDriverStrategy().name()
          + "' in JSON generator.");
      }

      if (locString != null) {
        writer.print(",\n\t\t\"driver_strategy\": \"");
        writer.print(locString);
        writer.print("\"");
      }
    }
   
    {
      // output node global properties
      final GlobalProperties gp = p.getGlobalProperties();

      writer.print(",\n\t\t\"global_properties\": [\n");

      addProperty(writer, "Partitioning", gp.getPartitioning().name(), true);
      if (gp.getPartitioningFields() != null) {
        addProperty(writer, "Partitioned on", gp.getPartitioningFields().toString(), false);
      }
      if (gp.getPartitioningOrdering() != null) {
        addProperty(writer, "Partitioning Order", gp.getPartitioningOrdering().toString(), false)
      }
      else {
        addProperty(writer, "Partitioning Order", "(none)", false);
      }
      if (n.getUniqueFields() == null || n.getUniqueFields().size() == 0) {
        addProperty(writer, "Uniqueness", "not unique", false);
      }
      else {
        addProperty(writer, "Uniqueness", n.getUniqueFields().toString(), false)
      }

      writer.print("\n\t\t]");
    }

    {
      // output node local properties
      LocalProperties lp = p.getLocalProperties();

      writer.print(",\n\t\t\"local_properties\": [\n");

      if (lp.getOrdering() != null) {
        addProperty(writer, "Order", lp.getOrdering().toString(), true)
      }
      else {
        addProperty(writer, "Order", "(none)", true);
      }
      if (lp.getGroupedFields() != null && lp.getGroupedFields().size() > 0) {
        addProperty(writer, "Grouped on", lp.getGroupedFields().toString(), false);
      } else {
        addProperty(writer, "Grouping", "not grouped", false)
      }
      if (n.getUniqueFields() == null || n.getUniqueFields().size() == 0) {
        addProperty(writer, "Uniqueness", "not unique", false);
      }
      else {
        addProperty(writer, "Uniqueness", n.getUniqueFields().toString(), false)
      }

      writer.print("\n\t\t]");
    }

    // output node size estimates
    writer.print(",\n\t\t\"estimates\": [\n");

    addProperty(writer, "Est. Output Size", n.getEstimatedOutputSize() == -1 ? "(unknown)"
      : formatNumber(n.getEstimatedOutputSize(), "B"), true);
    addProperty(writer, "Est. Cardinality", n.getEstimatedNumRecords() == -1 ? "(unknown)"
      : formatNumber(n.getEstimatedNumRecords()), false);

    writer.print("\t\t]");

    // output node cost
    if (p.getNodeCosts() != null) {
      writer.print(",\n\t\t\"costs\": [\n");

      addProperty(writer, "Network", p.getNodeCosts().getNetworkCost() == -1 ? "(unknown)"
        : formatNumber(p.getNodeCosts().getNetworkCost(), "B"), true);
      addProperty(writer, "Disk I/O", p.getNodeCosts().getDiskCost() == -1 ? "(unknown)"
        : formatNumber(p.getNodeCosts().getDiskCost(), "B"), false);
      addProperty(writer, "CPU", p.getNodeCosts().getCpuCost() == -1 ? "(unknown)"
        : formatNumber(p.getNodeCosts().getCpuCost(), ""), false);

      addProperty(writer, "Cumulative Network",
        p.getCumulativeCosts().getNetworkCost() == -1 ? "(unknown)" : formatNumber(p
          .getCumulativeCosts().getNetworkCost(), "B"), false);
      addProperty(writer, "Cumulative Disk I/O",
        p.getCumulativeCosts().getDiskCost() == -1 ? "(unknown)" : formatNumber(p
          .getCumulativeCosts().getDiskCost(), "B"), false);
      addProperty(writer, "Cumulative CPU",
        p.getCumulativeCosts().getCpuCost() == -1 ? "(unknown)" : formatNumber(p
          .getCumulativeCosts().getCpuCost(), ""), false);

      writer.print("\n\t\t]");
    }

    // output the node compiler hints
    if (n.getPactContract().getCompilerHints() != null) {
      CompilerHints hints = n.getPactContract().getCompilerHints();
      CompilerHints defaults = new CompilerHints();

      String size = hints.getOutputSize() == defaults.getOutputSize() ? "(none)" : String.valueOf(hints.getOutputSize());
      String card = hints.getOutputCardinality() == defaults.getOutputCardinality() ? "(none)" : String.valueOf(hints.getOutputCardinality());
      String width = hints.getAvgOutputRecordSize() == defaults.getAvgOutputRecordSize() ? "(none)" : String.valueOf(hints.getAvgOutputRecordSize());
View Full Code Here

      // check if we have been here before
      if (this.con2node.containsKey(c)) {
        return false;
      }

      final OptimizerNode n;

      // create a node for the operator (or sink or source) if we have not been here before
      if (c instanceof GenericDataSinkBase) {
        DataSinkNode dsn = new DataSinkNode((GenericDataSinkBase<?>) c);
        this.sinks.add(dsn);
        n = dsn;
      }
      else if (c instanceof GenericDataSourceBase) {
        DataSourceNode dsn = new DataSourceNode((GenericDataSourceBase<?, ?>) c);
        this.sources.add(dsn);
        n = dsn;
      }
      else if (c instanceof MapOperatorBase) {
        n = new MapNode((MapOperatorBase<?, ?, ?>) c);
      }
      else if (c instanceof CollectorMapOperatorBase) {
        n = new CollectorMapNode((CollectorMapOperatorBase<?, ?, ?>) c);
      }
      else if (c instanceof FlatMapOperatorBase) {
        n = new FlatMapNode((FlatMapOperatorBase<?, ?, ?>) c);
      }
      else if (c instanceof FilterOperatorBase) {
        n = new FilterNode((FilterOperatorBase<?, ?>) c);
      }
      else if (c instanceof ReduceOperatorBase) {
        n = new ReduceNode((ReduceOperatorBase<?, ?>) c);
      }
      else if (c instanceof GroupReduceOperatorBase) {
        n = new GroupReduceNode((GroupReduceOperatorBase<?, ?, ?>) c);
      }
      else if (c instanceof JoinOperatorBase) {
        n = new MatchNode((JoinOperatorBase<?, ?, ?, ?>) c);
      }
      else if (c instanceof CoGroupOperatorBase) {
        n = new CoGroupNode((CoGroupOperatorBase<?, ?, ?, ?>) c);
      }
      else if (c instanceof CrossOperatorBase) {
        n = new CrossNode((CrossOperatorBase<?, ?, ?, ?>) c);
      }
      else if (c instanceof BulkIterationBase) {
        n = new BulkIterationNode((BulkIterationBase<?>) c);
      }
      else if (c instanceof DeltaIterationBase) {
        n = new WorksetIterationNode((DeltaIterationBase<?, ?>) c);
      }
      else if (c instanceof Union){
        n = new BinaryUnionNode((Union<?>) c);
      }
      else if (c instanceof PartialSolutionPlaceHolder) {
        final PartialSolutionPlaceHolder<?> holder = (PartialSolutionPlaceHolder<?>) c;
        final BulkIterationBase<?> enclosingIteration = holder.getContainingBulkIteration();
        final BulkIterationNode containingIterationNode =
              (BulkIterationNode) this.parent.con2node.get(enclosingIteration);
       
        // catch this for the recursive translation of step functions
        BulkPartialSolutionNode p = new BulkPartialSolutionNode(holder, containingIterationNode);
        p.setDegreeOfParallelism(containingIterationNode.getDegreeOfParallelism());
        p.setSubtasksPerInstance(containingIterationNode.getSubtasksPerInstance());
        n = p;
      }
      else if (c instanceof WorksetPlaceHolder) {
        final WorksetPlaceHolder<?> holder = (WorksetPlaceHolder<?>) c;
        final DeltaIterationBase<?, ?> enclosingIteration = holder.getContainingWorksetIteration();
        final WorksetIterationNode containingIterationNode =
              (WorksetIterationNode) this.parent.con2node.get(enclosingIteration);
       
        // catch this for the recursive translation of step functions
        WorksetNode p = new WorksetNode(holder, containingIterationNode);
        p.setDegreeOfParallelism(containingIterationNode.getDegreeOfParallelism());
        p.setSubtasksPerInstance(containingIterationNode.getSubtasksPerInstance());
        n = p;
      }
      else if (c instanceof SolutionSetPlaceHolder) {
        final SolutionSetPlaceHolder<?> holder = (SolutionSetPlaceHolder<?>) c;
        final DeltaIterationBase<?, ?> enclosingIteration = holder.getContainingWorksetIteration();
        final WorksetIterationNode containingIterationNode =
              (WorksetIterationNode) this.parent.con2node.get(enclosingIteration);
       
        // catch this for the recursive translation of step functions
        SolutionSetNode p = new SolutionSetNode(holder, containingIterationNode);
        p.setDegreeOfParallelism(containingIterationNode.getDegreeOfParallelism());
        p.setSubtasksPerInstance(containingIterationNode.getSubtasksPerInstance());
        n = p;
      }
      else {
        throw new IllegalArgumentException("Unknown operator type: " + c.getClass() + " " + c);
      }

      this.con2node.put(c, n);
     
      // record the potential memory consumption
      this.numMemoryConsumers += n.isMemoryConsumer() ? 1 : 0;

      // set the parallelism only if it has not been set before. some nodes have a fixed DOP, such as the
      // key-less reducer (all-reduce)
      if (n.getDegreeOfParallelism() < 1) {
        // set the degree of parallelism
        int par = c.getDegreeOfParallelism();
        if (par > 0) {
          if (this.forceDOP && par != this.defaultParallelism) {
            par = this.defaultParallelism;
            LOG.warn("The degree-of-parallelism of nested Dataflows (such as step functions in iterations) is " +
              "currently fixed to the degree-of-parallelism of the surrounding operator (the iteration).");
          }
        } else {
          par = this.defaultParallelism;
        }
        n.setDegreeOfParallelism(par);
      }

      // check if we need to set the instance sharing accordingly such that
      // the maximum number of machines is not exceeded
      if (n.getSubtasksPerInstance() < 1) {
        int tasksPerInstance = 1;
        if (this.maxMachines > 0) {
          int p = n.getDegreeOfParallelism();
          tasksPerInstance = (p / this.maxMachines) + (p % this.maxMachines == 0 ? 0 : 1);
        }
 
        // we group together n tasks per machine, depending on config and the above computed
        // value required to obey the maximum number of machines
        n.setSubtasksPerInstance(tasksPerInstance);
      }
      return true;
    }
View Full Code Here

    }

    @Override
    public void postVisit(Operator<?> c) {
     
      OptimizerNode n = this.con2node.get(c);

      // first connect to the predecessors
      n.setInput(this.con2node);
      n.setBroadcastInputs(this.con2node);
     
      // if the node represents a bulk iteration, we recursively translate the data flow now
      if (n instanceof BulkIterationNode) {
        final BulkIterationNode iterNode = (BulkIterationNode) n;
        final BulkIterationBase<?> iter = iterNode.getIterationContract();

        // calculate closure of the anonymous function
        HashMap<Operator<?>, OptimizerNode> closure = new HashMap<Operator<?>, OptimizerNode>(con2node);

        // first, recursively build the data flow for the step function
        final GraphCreatingVisitor recursiveCreator = new GraphCreatingVisitor(this, true,
          this.maxMachines, iterNode.getDegreeOfParallelism(), closure);
       
        BulkPartialSolutionNode partialSolution = null;
       
        iter.getNextPartialSolution().accept(recursiveCreator);
       
        partialSolution =  (BulkPartialSolutionNode) recursiveCreator.con2node.get(iter.getPartialSolution());
        OptimizerNode rootOfStepFunction = recursiveCreator.con2node.get(iter.getNextPartialSolution());
        if (partialSolution == null) {
          throw new CompilerException("Error: The step functions result does not depend on the partial solution.");
        }
       
       
        OptimizerNode terminationCriterion = null;
       
        if (iter.getTerminationCriterion() != null) {
          terminationCriterion = recursiveCreator.con2node.get(iter.getTerminationCriterion());
         
          // no intermediate node yet, traverse from the termination criterion to build the missing parts
          if (terminationCriterion == null) {
            iter.getTerminationCriterion().accept(recursiveCreator);
            terminationCriterion = recursiveCreator.con2node.get(iter.getTerminationCriterion());
          }
        }
       
        iterNode.setNextPartialSolution(rootOfStepFunction, terminationCriterion);
        iterNode.setPartialSolution(partialSolution);
       
        // account for the nested memory consumers
        this.numMemoryConsumers += recursiveCreator.numMemoryConsumers;
       
        // go over the contained data flow and mark the dynamic path nodes
        StaticDynamicPathIdentifier identifier = new StaticDynamicPathIdentifier(iterNode.getCostWeight());
        rootOfStepFunction.accept(identifier);
        if(terminationCriterion != null){
          terminationCriterion.accept(identifier);
        }
      }
      else if (n instanceof WorksetIterationNode) {
        final WorksetIterationNode iterNode = (WorksetIterationNode) n;
        final DeltaIterationBase<?, ?> iter = iterNode.getIterationContract();

        // calculate the closure of the anonymous function
        HashMap<Operator<?>, OptimizerNode> closure = new HashMap<Operator<?>, OptimizerNode>(con2node);

        // first, recursively build the data flow for the step function
        final GraphCreatingVisitor recursiveCreator = new GraphCreatingVisitor(this, true,
          this.maxMachines, iterNode.getDegreeOfParallelism(), closure);
        // descend from the solution set delta. check that it depends on both the workset
        // and the solution set. If it does depend on both, this descend should create both nodes
        iter.getSolutionSetDelta().accept(recursiveCreator);
       
        final SolutionSetNode solutionSetNode = (SolutionSetNode) recursiveCreator.con2node.get(iter.getSolutionSet());
        final WorksetNode worksetNode = (WorksetNode) recursiveCreator.con2node.get(iter.getWorkset());
       
        if (worksetNode == null) {
          throw new CompilerException("In the given plan, the solution set delta does not depend on the workset. This is a prerequisite in workset iterations.");
        }
       
        iter.getNextWorkset().accept(recursiveCreator);
       
        if (solutionSetNode == null || solutionSetNode.getOutgoingConnections() == null || solutionSetNode.getOutgoingConnections().isEmpty()) {
          throw new CompilerException("Error: The step function does not reference the solution set.");
        } else {
          for (PactConnection conn : solutionSetNode.getOutgoingConnections()) {
            OptimizerNode successor = conn.getTarget();
         
            if (successor.getClass() == MatchNode.class) {
              // find out which input to the match the solution set is
              MatchNode mn = (MatchNode) successor;
              if (mn.getFirstPredecessorNode() == solutionSetNode) {
                mn.makeJoinWithSolutionSet(0);
              } else if (mn.getSecondPredecessorNode() == solutionSetNode) {
                mn.makeJoinWithSolutionSet(1);
              } else {
                throw new CompilerException();
              }
            }
            else if (successor.getClass() == CoGroupNode.class) {
              CoGroupNode cg = (CoGroupNode) successor;
              if (cg.getFirstPredecessorNode() == solutionSetNode) {
                cg.makeCoGroupWithSolutionSet(0);
              } else if (cg.getSecondPredecessorNode() == solutionSetNode) {
                cg.makeCoGroupWithSolutionSet(1);
              } else {
                throw new CompilerException();
              }
            }
            else {
              throw new CompilerException("Error: The solution set may only be joined with through a Join or a CoGroup function.");
            }
          }
        }
       
        final OptimizerNode nextWorksetNode = recursiveCreator.con2node.get(iter.getNextWorkset());
        final OptimizerNode solutionSetDeltaNode = recursiveCreator.con2node.get(iter.getSolutionSetDelta());
       
        // set the step function nodes to the iteration node
        iterNode.setPartialSolution(solutionSetNode, worksetNode);
        iterNode.setNextPartialSolution(solutionSetDeltaNode, nextWorksetNode);
       
View Full Code Here

      if (this.branchPlan == null) {
        this.branchPlan = new HashMap<OptimizerNode, PlanNode>(8);
      }

      for (OptimizerNode.UnclosedBranchDescriptor uc : this.template.getOpenBranches()) {
        OptimizerNode brancher = uc.getBranchingNode();
        PlanNode selectedCandidate = null;

        if (branchPlan1 != null) {
          // predecessor 1 has branching children, see if it got the branch we are looking for
          selectedCandidate = branchPlan1.get(brancher);
View Full Code Here

    }
  }

  private void mergeBranchPlanMaps() {
    for(OptimizerNode.UnclosedBranchDescriptor desc: template.getOpenBranches()){
      OptimizerNode brancher = desc.getBranchingNode();

      if(branchPlan == null) {
        branchPlan = new HashMap<OptimizerNode, PlanNode>(6);
      }
     
View Full Code Here

    program.accept(graphCreator);

    // if we have a plan with multiple data sinks, add logical optimizer nodes that have two data-sinks as children
    // each until we have only a single root node. This allows to transparently deal with the nodes with
    // multiple outputs
    OptimizerNode rootNode;
    if (graphCreator.sinks.size() == 1) {
      rootNode = graphCreator.sinks.get(0);
    } else if (graphCreator.sinks.size() > 1) {
      Iterator<DataSinkNode> iter = graphCreator.sinks.iterator();
      rootNode = iter.next();

      while (iter.hasNext()) {
        rootNode = new SinkJoiner(rootNode, iter.next());
      }
    } else {
      throw new CompilerException("Bug: The optimizer plan representation has no sinks.");
    }

    // now that we have all nodes created and recorded which ones consume memory, tell the nodes their minimal
    // guaranteed memory, for further cost estimations. we assume an equal distribution of memory among consumer tasks
   
    rootNode.accept(new IdAndMemoryAndEstimatesVisitor(this.statistics,
      graphCreator.getMemoryConsumerCount() == 0 ? 0 : memoryPerInstance / graphCreator.getMemoryConsumerCount()));
   
    // Now that the previous step is done, the next step is to traverse the graph again for the two
    // steps that cannot directly be performed during the plan enumeration, because we are dealing with DAGs
    // rather than a trees. That requires us to deviate at some points from the classical DB optimizer algorithms.
    //
    // 1) propagate the interesting properties top-down through the graph
    // 2) Track information about nodes with multiple outputs that are later on reconnected in a node with
    // multiple inputs.
    InterestingPropertyVisitor propsVisitor = new InterestingPropertyVisitor(this.costEstimator);
    rootNode.accept(propsVisitor);
   
    BranchesVisitor branchingVisitor = new BranchesVisitor();
    rootNode.accept(branchingVisitor);
   
    // perform a sanity check: the root may not have any unclosed branches
    if (rootNode.getOpenBranches() != null && rootNode.getOpenBranches().size() > 0) {
      throw new CompilerException("Bug: Logic for branching plans (non-tree plans) has an error, and does not " +
          "track the re-joining of branches correctly.");
    }

    // the final step is now to generate the actual plan alternatives
    List<PlanNode> bestPlan = rootNode.getAlternativePlans(this.costEstimator);

    if (bestPlan.size() != 1) {
      throw new CompilerException("Error in compiler: more than one best plan was created!");
    }
View Full Code Here

TOP

Related Classes of eu.stratosphere.compiler.dag.OptimizerNode$UnclosedBranchDescriptor

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.