Package org.apache.flink.api.java.record.operators

Examples of org.apache.flink.api.java.record.operators.BulkIteration


   
    FileDataSource pageWithRankInput = new FileDataSource(new DanglingPageRankInputFormat(),
      pageWithRankInputPath, "DanglingPageWithRankInput");
    pageWithRankInput.getParameters().setLong(DanglingPageRankInputFormat.NUM_VERTICES_PARAMETER, numVertices);
   
    BulkIteration iteration = new BulkIteration("Page Rank Loop");
    iteration.setInput(pageWithRankInput);
   
    FileDataSource adjacencyListInput = new FileDataSource(new ImprovedAdjacencyListInputFormat(),
      adjacencyListInputPath, "AdjancencyListInput");
   
    JoinOperator join = JoinOperator.builder(new DotProductMatch(), LongValue.class, 0, 0)
        .input1(iteration.getPartialSolution())
        .input2(adjacencyListInput)
        .name("Join with Edges")
        .build();
   
    CoGroupOperator rankAggregation = CoGroupOperator.builder(new DotProductCoGroup(), LongValue.class, 0, 0)
        .input1(iteration.getPartialSolution())
        .input2(join)
        .name("Rank Aggregation")
        .build();
    rankAggregation.getParameters().setLong(DotProductCoGroup.NUM_VERTICES_PARAMETER, numVertices);
    rankAggregation.getParameters().setLong(DotProductCoGroup.NUM_DANGLING_VERTICES_PARAMETER, numDanglingVertices);
   
    iteration.setNextPartialSolution(rankAggregation);
    iteration.setMaximumNumberOfIterations(numIterations);
    iteration.getAggregators().registerAggregationConvergenceCriterion(DotProductCoGroup.AGGREGATOR_NAME, new PageRankStatsAggregator(),
        new DiffL1NormConvergenceCriterion());
   
    FileDataSink out = new FileDataSink(new PageWithRankOutFormat(), outputPath, iteration, "Final Ranks");

    Plan p = new Plan(out, "Dangling PageRank");
View Full Code Here


   
    FileDataSource pageWithRankInput = new FileDataSource(new DanglingPageRankInputFormat(),
      pageWithRankInputPath, "PageWithRank Input");
    pageWithRankInput.getParameters().setLong(NUM_VERTICES_CONFIG_PARAM, numVertices);
   
    BulkIteration iteration = new BulkIteration("Page Rank Loop");
    iteration.setInput(pageWithRankInput);
   
    FileDataSource adjacencyListInput = new FileDataSource(new ImprovedAdjacencyListInputFormat(),
      adjacencyListInputPath, "AdjancencyListInput");
   
    JoinOperator join = JoinOperator.builder(new JoinVerexWithEdgesMatch(), LongValue.class, 0, 0)
        .input1(iteration.getPartialSolution())
        .input2(adjacencyListInput)
        .name("Join with Edges")
        .build();
   
    ReduceOperator rankAggregation = ReduceOperator.builder(new AggregatingReduce(), LongValue.class, 0)
        .input(join)
        .name("Rank Aggregation")
        .build();
   
    iteration.setNextPartialSolution(rankAggregation);
    iteration.setMaximumNumberOfIterations(numIterations);
   
    JoinOperator termination = JoinOperator.builder(new JoinOldAndNew(), LongValue.class, 0, 0)
        .input1(iteration.getPartialSolution())
        .input2(rankAggregation)
        .name("Join Old and New")
        .build();
   
    iteration.setTerminationCriterion(termination);
   
    FileDataSink out = new FileDataSink(new PageWithRankOutFormat(), outputPath, iteration, "Final Ranks");

    Plan p = new Plan(out, "Simple PageRank");
    p.setDefaultParallelism(dop);
View Full Code Here

 
  static Plan getTestPlanPlan(int numSubTasks, String input, String output) {

    FileDataSource initialInput = new FileDataSource(TextInputFormat.class, input, "input");
   
    BulkIteration iteration = new BulkIteration("Loop");
    iteration.setInput(initialInput);
    iteration.setMaximumNumberOfIterations(NUM_ITERATIONS);

    ReduceOperator sumReduce = ReduceOperator.builder(new SumReducer())
        .input(iteration.getPartialSolution())
        .name("Compute sum (Reduce)")
        .build();
   
    iteration.setNextPartialSolution(sumReduce);

    @SuppressWarnings("unchecked")
    FileDataSink finalResult = new FileDataSink(new CsvOutputFormat("\n"" ", StringValue.class), output, iteration, "Output");

    Plan plan = new Plan(finalResult, "Iteration with AllReducer (keyless Reducer)");
View Full Code Here

 
  private static Plan getTestPlanPlan(int numSubTasks, String input, String output) {

    FileDataSource initialInput = new FileDataSource(TextInputFormat.class, input, "input");
   
    BulkIteration iteration = new BulkIteration("Loop");
    iteration.setInput(initialInput);
    iteration.setMaximumNumberOfIterations(5);
    Assert.assertTrue(iteration.getMaximumNumberOfIterations() > 1);

    ReduceOperator sumReduce = ReduceOperator.builder(new SumReducer())
        .input(iteration.getPartialSolution())
        .name("Compute sum (Reduce)")
        .build();
   
    iteration.setNextPartialSolution(sumReduce);
   
    MapOperator terminationMapper = MapOperator.builder(new TerminationMapper())
        .input(iteration.getPartialSolution())
        .name("Compute termination criterion (Map)")
        .build();
   
    iteration.setTerminationCriterion(terminationMapper);

    FileDataSink finalResult = new FileDataSink(CsvOutputFormat.class, output, iteration, "Output");
    CsvOutputFormat.configureRecordFormat(finalResult)
      .recordDelimiter('\n')
      .fieldDelimiter(' ')
View Full Code Here

  static Plan getTestPlan(int numSubTasks, String input, String output) {

    FileDataSource initialInput = new FileDataSource(new PointInFormat(), input, "Input");
    initialInput.setDegreeOfParallelism(1);

    BulkIteration iteration = new BulkIteration("Loop");
    iteration.setInput(initialInput);
    iteration.setMaximumNumberOfIterations(2);

    ReduceOperator dummyReduce = ReduceOperator.builder(new DummyReducer(), IntValue.class, 0).input(iteration.getPartialSolution())
        .name("Reduce something").build();

    MapOperator dummyMap = MapOperator.builder(new IdentityMapper()).input(dummyReduce).build();
    iteration.setNextPartialSolution(dummyMap);

    FileDataSink finalResult = new FileDataSink(new PointOutFormat(), output, iteration, "Output");

    Plan plan = new Plan(finalResult, "Iteration with chained map test");
    plan.setDefaultParallelism(numSubTasks);
View Full Code Here

 
  private static Plan getTestPlanPlan(int numSubTasks, String input, String output) {

    FileDataSource initialInput = new FileDataSource(TextInputFormat.class, input, "input");
   
    BulkIteration iteration = new BulkIteration("Loop");
    iteration.setInput(initialInput);
    iteration.setMaximumNumberOfIterations(5);
    Assert.assertTrue(iteration.getMaximumNumberOfIterations() > 1);

    ReduceOperator sumReduce = ReduceOperator.builder(new SumReducer())
        .input(iteration.getPartialSolution())
        .name("Compute sum (Reduce)")
        .build();
   
    iteration.setNextPartialSolution(sumReduce);
   
    MapOperator terminationMapper = MapOperator.builder(new TerminationMapper())
        .input(sumReduce)
        .name("Compute termination criterion (Map)")
        .build();
   
    iteration.setTerminationCriterion(terminationMapper);

    FileDataSink finalResult = new FileDataSink(CsvOutputFormat.class, output, iteration, "Output");
    CsvOutputFormat.configureRecordFormat(finalResult)
      .recordDelimiter('\n')
      .fieldDelimiter(' ')
View Full Code Here

 
  private static Plan getTestPlanPlan(int numSubTasks, String input, String output) {

    FileDataSource initialInput = new FileDataSource(TextInputFormat.class, input, "input");
   
    BulkIteration iteration = new BulkIteration("Loop");
    iteration.setInput(initialInput);
    iteration.setMaximumNumberOfIterations(5);
   
    Assert.assertTrue(iteration.getMaximumNumberOfIterations() > 1);

    ReduceOperator sumReduce = ReduceOperator.builder(new PickOneReducer())
        .input(iteration.getPartialSolution())
        .name("Compute sum (Reduce)")
        .build();
   
    iteration.setNextPartialSolution(sumReduce);

    FileDataSink finalResult = new FileDataSink(CsvOutputFormat.class, output, iteration, "Output");
    CsvOutputFormat.configureRecordFormat(finalResult)
      .recordDelimiter('\n')
      .fieldDelimiter(' ')
View Full Code Here

    // create DataSourceContract for cluster center input
    FileDataSource initialClusterPoints = new FileDataSource(new PointInFormat(), clusterInput, "Centers");
    initialClusterPoints.setDegreeOfParallelism(1);
   
    BulkIteration iteration = new BulkIteration("K-Means Loop");
    iteration.setInput(initialClusterPoints);
    iteration.setMaximumNumberOfIterations(numIterations);
   
    // create DataSourceContract for data point input
    FileDataSource dataPoints = new FileDataSource(new PointInFormat(), dataPointInput, "Data Points");

    // create CrossOperator for distance computation
    CrossOperator computeDistance = CrossOperator.builder(new ComputeDistance())
        .input1(dataPoints)
        .input2(iteration.getPartialSolution())
        .name("Compute Distances")
        .build();

    // create ReduceOperator for finding the nearest cluster centers
    ReduceOperator findNearestClusterCenters = ReduceOperator.builder(new FindNearestCenter(), IntValue.class, 0)
        .input(computeDistance)
        .name("Find Nearest Centers")
        .build();

    // create ReduceOperator for computing new cluster positions
    ReduceOperator recomputeClusterCenter = ReduceOperator.builder(new RecomputeClusterCenter(), IntValue.class, 0)
        .input(findNearestClusterCenters)
        .name("Recompute Center Positions")
        .build();
    iteration.setNextPartialSolution(recomputeClusterCenter);
   
    // create DataSourceContract for data point input
    FileDataSource dataPoints2 = new FileDataSource(new PointInFormat(), dataPointInput, "Data Points 2");
   
    // compute distance of points to final clusters
View Full Code Here

   
    MapOperator clusterPoints = MapOperator.builder(new PointBuilder()).name("Build cluster points").input(clustersSource).build();
   
    // ---------------------- Begin K-Means Loop ---------------------
   
    BulkIteration iter = new BulkIteration("k-means loop");
    iter.setInput(clusterPoints);
    iter.setMaximumNumberOfIterations(numIterations);

    // compute the distances and select the closest center
    MapOperator findNearestClusterCenters = MapOperator.builder(new SelectNearestCenter())
      .setBroadcastVariable("centers", iter.getPartialSolution())
      .input(dataPoints)
      .name("Find Nearest Centers")
      .build();

    // computing the new cluster positions
    ReduceOperator recomputeClusterCenter = ReduceOperator.builder(new RecomputeClusterCenter(), IntValue.class, 0)
      .input(findNearestClusterCenters)
      .name("Recompute Center Positions")
      .build();
   
    iter.setNextPartialSolution(recomputeClusterCenter);

    // ---------------------- End K-Means Loop ---------------------
   
    // create DataSinkContract for writing the new cluster positions
    FileDataSink newClusterPoints = new FileDataSink(new PointOutFormat(), output, iter, "New Center Positions");
View Full Code Here

 
  @Test
  public void testBranchAfterIteration() {
    FileDataSource sourceA = new FileDataSource(DummyInputFormat.class, IN_FILE, "Source 2");
   
    BulkIteration iteration = new BulkIteration("Loop");
    iteration.setInput(sourceA);
    iteration.setMaximumNumberOfIterations(10);
   
    MapOperator mapper = MapOperator.builder(IdentityMap.class).name("Mapper").input(iteration.getPartialSolution()).build();
    iteration.setNextPartialSolution(mapper);
   
    FileDataSink sink1 = new FileDataSink(DummyOutputFormat.class, OUT_FILE, iteration, "Sink 1");
   
    MapOperator postMap = MapOperator.builder(IdentityMap.class).name("Post Iteration Mapper")
        .input(iteration).build();
View Full Code Here

TOP

Related Classes of org.apache.flink.api.java.record.operators.BulkIteration

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.