Package org.apache.flink.compiler.plan

Examples of org.apache.flink.compiler.plan.PlanNode


        }
       
        // check whether this node is a child of a node with the same contract (aka combiner)
        boolean shouldAdd = true;
        for (Iterator<PlanNode> iter = list.iterator(); iter.hasNext();) {
          PlanNode in = iter.next();
          if (in.getOriginalOptimizerNode().getPactContract() == c) {
            // is this the child or is our node the child
            if (in instanceof SingleInputPlanNode && n instanceof SingleInputPlanNode) {
              SingleInputPlanNode thisNode = (SingleInputPlanNode) n;
              SingleInputPlanNode otherNode = (SingleInputPlanNode) in;
             
View Full Code Here


    public <T extends PlanNode> T getNode(String name, Class<? extends Function> stubClass) {
      List<PlanNode> nodes = this.map.get(name);
      if (nodes == null || nodes.isEmpty()) {
        throw new RuntimeException("No node found with the given name and stub class.");
      } else {
        PlanNode found = null;
        for (PlanNode node : nodes) {
          if (node.getClass() == stubClass) {
            if (found == null) {
              found = node;
            } else {
View Full Code Here

         */
        if (visitable instanceof NAryUnionPlanNode) {
          int numberInputs = 0;
          for (Iterator<Channel> inputs = visitable.getInputs().iterator(); inputs.hasNext(); numberInputs++) {
            final Channel inConn = inputs.next();
            PlanNode inNode = inConn.getSource();
            Assert.assertTrue("Input of Union should be FlatMapOperators",
                inNode.getPactContract() instanceof FlatMapOperatorBase);
            Assert.assertTrue("Shipment strategy under union should partition the data",
                inConn.getShipStrategy() == ShipStrategyType.PARTITION_HASH);
          }
         
          Assert.assertTrue("NAryUnion should have " + NUM_INPUTS + " inputs", numberInputs == NUM_INPUTS);
View Full Code Here

    oPlan.accept(new Visitor<PlanNode>() {
      @Override
      public boolean preVisit(PlanNode visitable) {
        if (visitable instanceof WorksetIterationPlanNode) {
          PlanNode deltaNode = ((WorksetIterationPlanNode) visitable).getSolutionSetDeltaPlanNode();

          //get the CoGroup
          DualInputPlanNode dpn = (DualInputPlanNode) deltaNode.getInputs().iterator().next().getSource();
          Channel in1 = dpn.getInput1();
          Channel in2 = dpn.getInput2();

          Assert.assertTrue(in1.getLocalProperties().getOrdering() == null);
          Assert.assertTrue(in2.getLocalProperties().getOrdering() != null);
View Full Code Here

       
        // check that the root of the step function has the same DOP as the iteration.
        // because the tail must have the same DOP as the head, we can only merge the last
        // operator with the tail, if they have the same DOP. not merging is currently not
        // implemented
        PlanNode root = iterationNode.getRootOfStepFunction();
        if (root.getDegreeOfParallelism() != node.getDegreeOfParallelism())
        {
          throw new CompilerException("Error: The final operator of the step " +
              "function has a different degree of parallelism than the iteration operator itself.");
        }
       
        IterationDescriptor descr = new IterationDescriptor(iterationNode, this.iterationIdEnumerator++);
        this.iterations.put(iterationNode, descr);
        vertex = null;
      }
      else if (node instanceof WorksetIterationPlanNode) {
        WorksetIterationPlanNode iterationNode = (WorksetIterationPlanNode) node;

        // we have the same constraints as for the bulk iteration
        PlanNode nextWorkSet = iterationNode.getNextWorkSetPlanNode();
        PlanNode solutionSetDelta  = iterationNode.getSolutionSetDeltaPlanNode();
       
        if (nextWorkSet.getDegreeOfParallelism() != node.getDegreeOfParallelism())
        {
          throw new CompilerException("It is currently not supported that the final operator of the step " +
              "function has a different degree of parallelism than the iteration operator itself.");
        }
        if (solutionSetDelta.getDegreeOfParallelism() != node.getDegreeOfParallelism())
        {
          throw new CompilerException("It is currently not supported that the final operator of the step " +
              "function has a different degree of parallelism than the iteration operator itself.");
        }
       
        IterationDescriptor descr = new IterationDescriptor(iterationNode, this.iterationIdEnumerator++);
        this.iterations.put(iterationNode, descr);
        vertex = null;
      }
      else if (node instanceof SingleInputPlanNode) {
        vertex = createSingleInputVertex((SingleInputPlanNode) node);
      }
      else if (node instanceof DualInputPlanNode) {
        vertex = createDualInputVertex((DualInputPlanNode) node);
      }
      else if (node instanceof NAryUnionPlanNode) {
        // skip the union for now
        vertex = null;
      }
      else if (node instanceof BulkPartialSolutionPlanNode) {
        // create a head node (or not, if it is merged into its successor)
        vertex = createBulkIterationHead((BulkPartialSolutionPlanNode) node);
      }
      else if (node instanceof SolutionSetPlanNode) {
        // this represents an access into the solution set index.
        // we do not create a vertex for the solution set here (we create the head at the workset place holder)
       
        // we adjust the joins / cogroups that go into the solution set here
        for (Channel c : node.getOutgoingChannels()) {
          DualInputPlanNode target = (DualInputPlanNode) c.getTarget();
          AbstractJobVertex accessingVertex = this.vertices.get(target);
          TaskConfig conf = new TaskConfig(accessingVertex.getConfiguration());
          int inputNum = c == target.getInput1() ? 0 : c == target.getInput2() ? 1 : -1;
         
          // sanity checks
          if (inputNum == -1) {
            throw new CompilerException();
          }
         
          // adjust the driver
          if (conf.getDriver().equals(MatchDriver.class)) {
            conf.setDriver(inputNum == 0 ? JoinWithSolutionSetFirstDriver.class : JoinWithSolutionSetSecondDriver.class);
          }
          else if (conf.getDriver().equals(CoGroupDriver.class)) {
            conf.setDriver(inputNum == 0 ? CoGroupWithSolutionSetFirstDriver.class : CoGroupWithSolutionSetSecondDriver.class);
          }
          else {
            throw new CompilerException("Found join with solution set using incompatible operator (only Join/CoGroup are valid).");
          }
        }
       
        // make sure we do not visit this node again. for that, we add a 'already seen' entry into one of the sets
        this.chainedTasks.put(node, ALREADY_VISITED_PLACEHOLDER);
       
        vertex = null;
      }
      else if (node instanceof WorksetPlanNode) {
        // create the iteration head here
        vertex = createWorksetIterationHead((WorksetPlanNode) node);
      }
      else {
        throw new CompilerException("Unrecognized node type: " + node.getClass().getName());
      }
    }
    catch (Exception e) {
      throw new CompilerException("Error translating node '" + node + "': " + e.getMessage(), e);
    }
   
    // check if a vertex was created, or if it was chained or skipped
    if (vertex != null) {
      // set degree of parallelism
      int pd = node.getDegreeOfParallelism();
      vertex.setParallelism(pd);
     
      vertex.setSlotSharingGroup(sharingGroup);
     
      // check whether this vertex is part of an iteration step function
      if (this.currentIteration != null) {
        // check that the task has the same DOP as the iteration as such
        PlanNode iterationNode = (PlanNode) this.currentIteration;
        if (iterationNode.getDegreeOfParallelism() < pd) {
          throw new CompilerException("Error: All functions that are part of an iteration must have the same, or a lower, degree-of-parallelism than the iteration operator.");
        }

        // store the id of the iterations the step functions participate in
        IterationDescriptor descr = this.iterations.get(this.currentIteration);
View Full Code Here

          }
 
          AbstractJobVertex container = chainedTask.getContainingVertex();
         
          if (container == null) {
            final PlanNode sourceNode = inConn.getSource();
            container = this.vertices.get(sourceNode);
            if (container == null) {
              // predecessor is itself chained
              container = this.chainedTasks.get(sourceNode).getContainingVertex();
              if (container == null) {
View Full Code Here

  }
 
  private int translateChannel(Channel input, int inputIndex, AbstractJobVertex targetVertex,
      TaskConfig targetVertexConfig, boolean isBroadcast) throws Exception
  {
    final PlanNode inputPlanNode = input.getSource();
    final Iterator<Channel> allInChannels;
   
    if (inputPlanNode instanceof NAryUnionPlanNode) {
      allInChannels = ((NAryUnionPlanNode) inputPlanNode).getListOfInputs().iterator();
    }
    else if (inputPlanNode instanceof BulkPartialSolutionPlanNode) {
      if (this.vertices.get(inputPlanNode) == null) {
        // merged iteration head
        final BulkPartialSolutionPlanNode pspn = (BulkPartialSolutionPlanNode) inputPlanNode;
        final BulkIterationPlanNode iterationNode = pspn.getContainingIterationNode();
       
        // check if the iteration's input is a union
        if (iterationNode.getInput().getSource() instanceof NAryUnionPlanNode) {
          allInChannels = ((NAryUnionPlanNode) iterationNode.getInput().getSource()).getInputs().iterator();
        } else {
          allInChannels = Collections.singletonList(iterationNode.getInput()).iterator();
        }
       
        // also, set the index of the gate with the partial solution
        targetVertexConfig.setIterationHeadPartialSolutionOrWorksetInputIndex(inputIndex);
      } else {
        // standalone iteration head
        allInChannels = Collections.singletonList(input).iterator();
      }
    } else if (inputPlanNode instanceof WorksetPlanNode) {
      if (this.vertices.get(inputPlanNode) == null) {
        // merged iteration head
        final WorksetPlanNode wspn = (WorksetPlanNode) inputPlanNode;
        final WorksetIterationPlanNode iterationNode = wspn.getContainingIterationNode();
       
        // check if the iteration's input is a union
        if (iterationNode.getInput2().getSource() instanceof NAryUnionPlanNode) {
          allInChannels = ((NAryUnionPlanNode) iterationNode.getInput2().getSource()).getInputs().iterator();
        } else {
          allInChannels = Collections.singletonList(iterationNode.getInput2()).iterator();
        }
       
        // also, set the index of the gate with the partial solution
        targetVertexConfig.setIterationHeadPartialSolutionOrWorksetInputIndex(inputIndex);
      } else {
        // standalone iteration head
        allInChannels = Collections.singletonList(input).iterator();
      }
    } else if (inputPlanNode instanceof SolutionSetPlanNode) {
      // for now, skip connections with the solution set node, as this is a local index access (later to be parameterized here)
      // rather than a vertex connection
      return 0;
    } else {
      allInChannels = Collections.singletonList(input).iterator();
    }
   
    // check that the type serializer is consistent
    TypeSerializerFactory<?> typeSerFact = null;
   
    // accounting for channels on the dynamic path
    int numChannelsTotal = 0;
    int numChannelsDynamicPath = 0;
    int numDynamicSenderTasksTotal = 0;
   

    // expand the channel to all the union channels, in case there is a union operator at its source
    while (allInChannels.hasNext()) {
      final Channel inConn = allInChannels.next();
     
      // sanity check the common serializer
      if (typeSerFact == null) {
        typeSerFact = inConn.getSerializer();
      } else if (!typeSerFact.equals(inConn.getSerializer())) {
        throw new CompilerException("Conflicting types in union operator.");
      }
     
      final PlanNode sourceNode = inConn.getSource();
      AbstractJobVertex sourceVertex = this.vertices.get(sourceNode);
      TaskConfig sourceVertexConfig;

      if (sourceVertex == null) {
        // this predecessor is chained to another task or an iteration
View Full Code Here

   
    // check, whether chaining is possible
    boolean chaining = false;
    {
      Channel inConn = node.getInput();
      PlanNode pred = inConn.getSource();
      chaining = ds.getPushChainDriverClass() != null &&
          !(pred instanceof NAryUnionPlanNode) &&  // first op after union is stand-alone, because union is merged
          !(pred instanceof BulkPartialSolutionPlanNode) &&  // partial solution merges anyways
          !(pred instanceof WorksetPlanNode) &&  // workset merges anyways
          !(pred instanceof IterationPlanNode) && // cannot chain with iteration heads currently
          inConn.getShipStrategy() == ShipStrategyType.FORWARD &&
          inConn.getLocalStrategy() == LocalStrategy.NONE &&
          pred.getOutgoingChannels().size() == 1 &&
          node.getDegreeOfParallelism() == pred.getDegreeOfParallelism() &&
          node.getBroadcastInputs().isEmpty();
     
      // cannot chain the nodes that produce the next workset or the next solution set, if they are not the
      // in a tail
      if (this.currentIteration != null && this.currentIteration instanceof WorksetIterationPlanNode &&
View Full Code Here

    {
      List<PlanNode> newCandidates = new ArrayList<PlanNode>();
     
      for (Iterator<PlanNode> planDeleter = candidates.iterator(); planDeleter.hasNext(); ) {
        PlanNode candidate = planDeleter.next();
       
        GlobalProperties atEndGlobal = candidate.getGlobalProperties();
        LocalProperties atEndLocal = candidate.getLocalProperties();
       
        FeedbackPropertiesMeetRequirementsReport report = candidate.checkPartialSolutionPropertiesMet(pspn, atEndGlobal, atEndLocal);
        if (report == FeedbackPropertiesMeetRequirementsReport.NO_PARTIAL_SOLUTION) {
          ; // depends only through broadcast variable on the partial solution
        }
        else if (report == FeedbackPropertiesMeetRequirementsReport.NOT_MET) {
          // attach a no-op node through which we create the properties of the original input
          Channel toNoOp = new Channel(candidate);
          globPropsReq.parameterizeChannel(toNoOp, false);
          locPropsReq.parameterizeChannel(toNoOp);
         
          UnaryOperatorNode rebuildPropertiesNode = new UnaryOperatorNode("Rebuild Partial Solution Properties", FieldList.EMPTY_LIST);
          rebuildPropertiesNode.setDegreeOfParallelism(candidate.getDegreeOfParallelism());
         
          SingleInputPlanNode rebuildPropertiesPlanNode = new SingleInputPlanNode(rebuildPropertiesNode, "Rebuild Partial Solution Properties", toNoOp, DriverStrategy.UNARY_NO_OP);
          rebuildPropertiesPlanNode.initProperties(toNoOp.getGlobalProperties(), toNoOp.getLocalProperties());
          estimator.costOperator(rebuildPropertiesPlanNode);
           
          GlobalProperties atEndGlobalModified = rebuildPropertiesPlanNode.getGlobalProperties();
          LocalProperties atEndLocalModified = rebuildPropertiesPlanNode.getLocalProperties();
           
          if (!(atEndGlobalModified.equals(atEndGlobal) && atEndLocalModified.equals(atEndLocal))) {
            FeedbackPropertiesMeetRequirementsReport report2 = candidate.checkPartialSolutionPropertiesMet(pspn, atEndGlobalModified, atEndLocalModified);
           
            if (report2 != FeedbackPropertiesMeetRequirementsReport.NOT_MET) {
              newCandidates.add(rebuildPropertiesPlanNode);
            }
          }
         
          planDeleter.remove();
        }
      }
    }
   
    if (candidates.isEmpty()) {
      return;
    }
   
    // 5) Create a candidate for the Iteration Node for every remaining plan of the step function.
    if (terminationCriterion == null) {
      for (PlanNode candidate : candidates) {
        BulkIterationPlanNode node = new BulkIterationPlanNode(this, "BulkIteration ("+this.getPactContract().getName()+")", in, pspn, candidate);
        GlobalProperties gProps = candidate.getGlobalProperties().clone();
        LocalProperties lProps = candidate.getLocalProperties().clone();
        node.initProperties(gProps, lProps);
        target.add(node);
      }
    }
    else if (candidates.size() > 0) {
      List<PlanNode> terminationCriterionCandidates = this.terminationCriterion.getAlternativePlans(estimator);

      SingleRootJoiner singleRoot = (SingleRootJoiner) this.singleRoot;
     
      for (PlanNode candidate : candidates) {
        for (PlanNode terminationCandidate : terminationCriterionCandidates) {
          if (singleRoot.areBranchCompatible(candidate, terminationCandidate)) {
            BulkIterationPlanNode node = new BulkIterationPlanNode(this, "BulkIteration ("+this.getPactContract().getName()+")", in, pspn, candidate, terminationCandidate);
            GlobalProperties gProps = candidate.getGlobalProperties().clone();
            LocalProperties lProps = candidate.getLocalProperties().clone();
            node.initProperties(gProps, lProps);
            target.add(node);
           
          }
        }
View Full Code Here

    //    this translates to a local strategy that would only be executed in the first iteration
   
    final boolean merge;
    if (mergeIterationAuxTasks && pspn.getOutgoingChannels().size() == 1) {
      final Channel c = pspn.getOutgoingChannels().get(0);
      final PlanNode successor = c.getTarget();
      merge = c.getShipStrategy() == ShipStrategyType.FORWARD &&
          c.getLocalStrategy() == LocalStrategy.NONE &&
          c.getTempMode() == TempMode.NONE &&
          successor.getDegreeOfParallelism() == pspn.getDegreeOfParallelism() &&
          !(successor instanceof NAryUnionPlanNode) &&
          successor != iteration.getRootOfStepFunction() &&
          iteration.getInput().getLocalStrategy() == LocalStrategy.NONE;
    } else {
      merge = false;
    }
   
    // create or adopt the head vertex
    final AbstractJobVertex toReturn;
    final AbstractJobVertex headVertex;
    final TaskConfig headConfig;
    if (merge) {
      final PlanNode successor = pspn.getOutgoingChannels().get(0).getTarget();
      headVertex = (AbstractJobVertex) this.vertices.get(successor);
     
      if (headVertex == null) {
        throw new CompilerException(
          "Bug: Trying to merge solution set with its sucessor, but successor has not been created.");
View Full Code Here

TOP

Related Classes of org.apache.flink.compiler.plan.PlanNode

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.