Package org.apache.flink.compiler.plan

Examples of org.apache.flink.compiler.plan.OptimizedPlan


      sinks.add(sinkB);
     
      // return the PACT plan
      Plan plan = new Plan(sinks, "Plans With Multiple Data Sinks");
     
      OptimizedPlan oPlan = compileNoStats(plan);
     
      // ---------- check the optimizer plan ----------
     
      // number of sinks
      Assert.assertEquals("Wrong number of data sinks.", 2, oPlan.getDataSinks().size());
     
      // sinks contain all sink paths
      Set<String> allSinks = new HashSet<String>();
      allSinks.add(out1Path);
      allSinks.add(out2Path);
     
      for (SinkPlanNode n : oPlan.getDataSinks()) {
        String path = ((FileDataSink) n.getSinkNode().getPactContract()).getFilePath();
        Assert.assertTrue("Invalid data sink.", allSinks.remove(path));
      }
     
      // ---------- compile plan to nephele job graph to verify that no error is thrown ----------
View Full Code Here


        .groupBy(0, 1).sum(2)
        .groupBy(0).sum(1)
        .print();
     
      Plan p = env.createProgramPlan();
      OptimizedPlan op = compileNoStats(p);
     
      SinkPlanNode sink = op.getDataSinks().iterator().next();
     
      SingleInputPlanNode agg2Reducer = (SingleInputPlanNode) sink.getInput().getSource();
      SingleInputPlanNode agg2Combiner = (SingleInputPlanNode) agg2Reducer.getInput().getSource();
      SingleInputPlanNode agg1Reducer = (SingleInputPlanNode) agg2Combiner.getInput().getSource();
     
View Full Code Here

        .groupBy(0).sum(1)
        .groupBy(0, 1).sum(2)
        .print();
     
      Plan p = env.createProgramPlan();
      OptimizedPlan op = compileNoStats(p);
     
      SinkPlanNode sink = op.getDataSinks().iterator().next();
     
      SingleInputPlanNode agg2Reducer = (SingleInputPlanNode) sink.getInput().getSource();
      SingleInputPlanNode agg1Reducer = (SingleInputPlanNode) agg2Reducer.getInput().getSource();
     
      assertEquals(ShipStrategyType.FORWARD, agg2Reducer.getInput().getShipStrategy());
View Full Code Here

  @Test
  public void testJavaApiWithDeferredSoltionSetUpdateWithMapper() {
    try {
      Plan plan = getJavaTestPlan(false, true);
     
      OptimizedPlan oPlan = compileNoStats(plan);
 
      OptimizerPlanNodeResolver resolver = getOptimizerPlanNodeResolver(oPlan);
      DualInputPlanNode joinWithInvariantNode = resolver.getNode(JOIN_WITH_INVARIANT_NAME);
      DualInputPlanNode joinWithSolutionSetNode = resolver.getNode(JOIN_WITH_SOLUTION_SET);
      SingleInputPlanNode worksetReducer = resolver.getNode(NEXT_WORKSET_REDUCER_NAME);
View Full Code Here

  @Test
  public void testJavaApiWithDeferredSoltionSetUpdateWithNonPreservingJoin() {
    try {
      Plan plan = getJavaTestPlan(false, false);
     
      OptimizedPlan oPlan = compileNoStats(plan);
     
      OptimizerPlanNodeResolver resolver = getOptimizerPlanNodeResolver(oPlan);
      DualInputPlanNode joinWithInvariantNode = resolver.getNode(JOIN_WITH_INVARIANT_NAME);
      DualInputPlanNode joinWithSolutionSetNode = resolver.getNode(JOIN_WITH_SOLUTION_SET);
      SingleInputPlanNode worksetReducer = resolver.getNode(NEXT_WORKSET_REDUCER_NAME);
View Full Code Here

  @Test
  public void testJavaApiWithDirectSoltionSetUpdate() {
    try {
      Plan plan = getJavaTestPlan(true, false);
     
      OptimizedPlan oPlan = compileNoStats(plan);
 
     
      OptimizerPlanNodeResolver resolver = getOptimizerPlanNodeResolver(oPlan);
      DualInputPlanNode joinWithInvariantNode = resolver.getNode(JOIN_WITH_INVARIANT_NAME);
      DualInputPlanNode joinWithSolutionSetNode = resolver.getNode(JOIN_WITH_SOLUTION_SET);
View Full Code Here

    sink.setInput(reduce2);
   
    Plan plan = new Plan(sink, "Test Increasing Degree Of Parallelism");
   
    // submit the plan to the compiler
    OptimizedPlan oPlan = compileNoStats(plan);
   
    // check the optimized Plan
    // when reducer 1 distributes its data across the instances of map2, it needs to employ a local hash method,
    // because map2 has twice as many instances and key/value pairs with the same key need to be processed by the same
    // mapper respectively reducer
    SinkPlanNode sinkNode = oPlan.getDataSinks().iterator().next();
    SingleInputPlanNode red2Node = (SingleInputPlanNode) sinkNode.getPredecessor();
    SingleInputPlanNode map2Node = (SingleInputPlanNode) red2Node.getPredecessor();
   
    ShipStrategyType mapIn = map2Node.getInput().getShipStrategy();
    ShipStrategyType redIn = red2Node.getInput().getShipStrategy();
View Full Code Here

    sink.setInput(reduce2);
   
    Plan plan = new Plan(sink, "Test Increasing Degree Of Parallelism");
   
    // submit the plan to the compiler
    OptimizedPlan oPlan = compileNoStats(plan);
   
    // check the optimized Plan
    // when reducer 1 distributes its data across the instances of map2, it needs to employ a local hash method,
    // because map2 has twice as many instances and key/value pairs with the same key need to be processed by the same
    // mapper respectively reducer
    SinkPlanNode sinkNode = oPlan.getDataSinks().iterator().next();
    SingleInputPlanNode red2Node = (SingleInputPlanNode) sinkNode.getPredecessor();
    SingleInputPlanNode map2Node = (SingleInputPlanNode) red2Node.getPredecessor();
   
    ShipStrategyType mapIn = map2Node.getInput().getShipStrategy();
    ShipStrategyType reduceIn = red2Node.getInput().getShipStrategy();
View Full Code Here

    sink.setInput(reduce2);
   
    Plan plan = new Plan(sink, "Test Increasing Degree Of Parallelism");
   
    // submit the plan to the compiler
    OptimizedPlan oPlan = compileNoStats(plan);
   
    // check the optimized Plan
    // when reducer 1 distributes its data across the instances of map2, it needs to employ a local hash method,
    // because map2 has twice as many instances and key/value pairs with the same key need to be processed by the same
    // mapper respectively reducer
    SinkPlanNode sinkNode = oPlan.getDataSinks().iterator().next();
    SingleInputPlanNode red2Node = (SingleInputPlanNode) sinkNode.getPredecessor();
    SingleInputPlanNode map2Node = (SingleInputPlanNode) red2Node.getPredecessor();
   
    ShipStrategyType mapIn = map2Node.getInput().getShipStrategy();
    ShipStrategyType reduceIn = red2Node.getInput().getShipStrategy();
View Full Code Here

          .map(new IdentityMapper<Tuple2<Long, Long>>()).withBroadcastSet(iter.getWorkset(), "bc data")
          .join(iter.getSolutionSet()).where(0).equalTo(1).projectFirst(1).projectSecond(1).types(Long.class, Long.class);
     
      iter.closeWith(result.map(new IdentityMapper<Tuple2<Long,Long>>()), result).print();
     
      OptimizedPlan p = compileNoStats(env.createProgramPlan());
     
      new PlanJSONDumpGenerator().getOptimizerPlanAsJSON(p);
    }
    catch (Exception e) {
      e.printStackTrace();
View Full Code Here

TOP

Related Classes of org.apache.flink.compiler.plan.OptimizedPlan

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.