Package org.apache.flink.api.java

Examples of org.apache.flink.api.java.ExecutionEnvironment.fromElements()


  @Override
  protected void testProgram() throws Exception {
    final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

    DataSet<String> text = env.fromElements(WordCountData.TEXT);
    DataSet<Tuple2<String, Integer>> words = text.flatMap(new WordCount.Tokenizer());
    DataSet<Tuple2<String, Integer>> result = words.groupBy(0).aggregate(Aggregations.SUM, 1);

    result.output(new LocalCollectionOutputFormat<Tuple2<String, Integer>>(resultsCollected));
    env.execute("Word Count Collection");
View Full Code Here


     
    // set up execution environment
    ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
   
    // read vertex and edge data
    DataSet<Long> vertices = env.fromElements(ConnectedComponentsData.getEnumeratingVertices(NUM_VERTICES).split("\n"))
        .map(new VertexParser());
   
    DataSet<Tuple2<Long, Long>> edges = env.fromElements(ConnectedComponentsData.getRandomOddEvenEdges(NUM_EDGES, NUM_VERTICES, SEED).split("\n"))
        .flatMap(new EdgeParser());
   
View Full Code Here

   
    // read vertex and edge data
    DataSet<Long> vertices = env.fromElements(ConnectedComponentsData.getEnumeratingVertices(NUM_VERTICES).split("\n"))
        .map(new VertexParser());
   
    DataSet<Tuple2<Long, Long>> edges = env.fromElements(ConnectedComponentsData.getRandomOddEvenEdges(NUM_EDGES, NUM_VERTICES, SEED).split("\n"))
        .flatMap(new EdgeParser());
   
    // assign the initial components (equal to the vertex id)
    DataSet<Tuple2<Long, Long>> verticesWithInitialId = vertices.map(new DuplicateValue<Long>());
       
View Full Code Here

  @Test
  public void testNoBreakerForIndependentVariable() {
    try {
      ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
     
      DataSet<String> source1 = env.fromElements("test");
      DataSet<String> source2 = env.fromElements("test");
     
      source1.map(new IdentityMapper<String>()).withBroadcastSet(source2, "some name").print();
     
      Plan p = env.createProgramPlan();
View Full Code Here

  public void testNoBreakerForIndependentVariable() {
    try {
      ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
     
      DataSet<String> source1 = env.fromElements("test");
      DataSet<String> source2 = env.fromElements("test");
     
      source1.map(new IdentityMapper<String>()).withBroadcastSet(source2, "some name").print();
     
      Plan p = env.createProgramPlan();
      OptimizedPlan op = compileNoStats(p);
View Full Code Here

   @Test
  public void testBreakerForDependentVariable() {
      try {
        ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
       
        DataSet<String> source1 = env.fromElements("test");
       
        source1.map(new IdentityMapper<String>()).map(new IdentityMapper<String>()).withBroadcastSet(source1, "some name").print();
       
        Plan p = env.createProgramPlan();
        OptimizedPlan op = compileNoStats(p);
View Full Code Here

  public void testPartitioningNotPushedDown() {
    try {
      ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
     
      @SuppressWarnings("unchecked")
      DataSet<Tuple3<Long, Long, Long>> input = env.fromElements(new Tuple3<Long, Long, Long>(0L, 0L, 0L));
     
      input
        .groupBy(0, 1).sum(2)
        .groupBy(0).sum(1)
        .print();
View Full Code Here

  public void testPartitioningReused() {
    try {
      ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
     
      @SuppressWarnings("unchecked")
      DataSet<Tuple3<Long, Long, Long>> input = env.fromElements(new Tuple3<Long, Long, Long>(0L, 0L, 0L));
     
      input
        .groupBy(0).sum(1)
        .groupBy(0, 1).sum(2)
        .print();
View Full Code Here

    try {
      ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
      env.setDegreeOfParallelism(DEFAULT_PARALLELISM);
     
      @SuppressWarnings("unchecked")
      DataSet<Tuple3<Long, Long, Long>> solutionSetInput = env.fromElements(new Tuple3<Long, Long, Long>(1L, 2L, 3L)).name("Solution Set");
      @SuppressWarnings("unchecked")
      DataSet<Tuple3<Long, Long, Long>> worksetInput = env.fromElements(new Tuple3<Long, Long, Long>(1L, 2L, 3L)).name("Workset");
      @SuppressWarnings("unchecked")
      DataSet<Tuple3<Long, Long, Long>> invariantInput = env.fromElements(new Tuple3<Long, Long, Long>(1L, 2L, 3L)).name("Invariant Input");
     
View Full Code Here

      env.setDegreeOfParallelism(DEFAULT_PARALLELISM);
     
      @SuppressWarnings("unchecked")
      DataSet<Tuple3<Long, Long, Long>> solutionSetInput = env.fromElements(new Tuple3<Long, Long, Long>(1L, 2L, 3L)).name("Solution Set");
      @SuppressWarnings("unchecked")
      DataSet<Tuple3<Long, Long, Long>> worksetInput = env.fromElements(new Tuple3<Long, Long, Long>(1L, 2L, 3L)).name("Workset");
      @SuppressWarnings("unchecked")
      DataSet<Tuple3<Long, Long, Long>> invariantInput = env.fromElements(new Tuple3<Long, Long, Long>(1L, 2L, 3L)).name("Invariant Input");
     
      DeltaIteration<Tuple3<Long, Long, Long>, Tuple3<Long, Long, Long>> iter = solutionSetInput.iterateDelta(worksetInput, 100, 1, 2);
     
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.