Package org.apache.flink.api.java.record.io

Examples of org.apache.flink.api.java.record.io.CsvOutputFormat


    {
      TaskConfig taskConfig = new TaskConfig(output.getConfiguration());
      taskConfig.addInputToGroup(0);
      taskConfig.setInputSerializer(serializer, 0);

      @SuppressWarnings("unchecked")
      CsvOutputFormat outFormat = new CsvOutputFormat("\n", " ", LongValue.class, LongValue.class, LongValue.class);
      outFormat.setOutputFilePath(new Path(resultPath));
     
      taskConfig.setStubWrapper(new UserCodeObjectWrapper<CsvOutputFormat>(outFormat));
    }

    return output;
View Full Code Here


      .build();
    closeTriads.setParameter("INPUT_LEFT_SHIP_STRATEGY", "SHIP_REPARTITION_HASH");
    closeTriads.setParameter("INPUT_RIGHT_SHIP_STRATEGY", "SHIP_REPARTITION_HASH");
    closeTriads.setParameter("LOCAL_STRATEGY", "LOCAL_STRATEGY_HASH_BUILD_SECOND");

    FileDataSink triangles = new FileDataSink(new CsvOutputFormat(), output, "Output");
    CsvOutputFormat.configureRecordFormat(triangles)
      .recordDelimiter('\n')
      .fieldDelimiter(' ')
      .field(StringValue.class, 0)
      .field(StringValue.class, 1)
View Full Code Here

    MapOperator mapper = MapOperator.builder(new TokenizeLine())
      .input(source)
      .name("Tokenize Lines")
      .build();

    FileDataSink out = new FileDataSink(new CsvOutputFormat(), output, mapper, "Selection");
    CsvOutputFormat.configureRecordFormat(out)
      .recordDelimiter('\n')
      .fieldDelimiter(' ')
      .field(StringValue.class, 0)
      .field(IntValue.class, 1);
View Full Code Here

      .input2(aggInput2)
      .name("JoinLiO")
      .build();

    // create DataSinkContract for writing the result
    FileDataSink result = new FileDataSink(new CsvOutputFormat(), output, joinLiO, "Output");
    CsvOutputFormat.configureRecordFormat(result)
      .recordDelimiter('\n')
      .fieldDelimiter('|')
      .lenient(true)
      .field(IntValue.class, 0)
View Full Code Here

      .input(joinLiO, partJoin2, partJoin1)
      .name("AggLio")
      .build();

    // create DataSinkContract for writing the result
    FileDataSink result = new FileDataSink(new CsvOutputFormat(), output, aggLiO, "Output");
    CsvOutputFormat.configureRecordFormat(result)
      .recordDelimiter('\n')
      .fieldDelimiter('|')
      .lenient(true)
      .field(LongValue.class, 0)
View Full Code Here

      .build();
    // sets the group sorting to the second field
    sorter.setGroupOrder(new Ordering(1, IntValue.class, Order.ASCENDING));

    // create and configure the output format
    FileDataSink out = new FileDataSink(new CsvOutputFormat(), output, sorter, "Sorted Output");
    CsvOutputFormat.configureRecordFormat(out)
      .recordDelimiter('\n')
      .fieldDelimiter(' ')
      .field(IntValue.class, 0)
      .field(IntValue.class, 1);
View Full Code Here

        .build();
      ReduceOperator reduceNode = ReduceOperator.builder(new CountWords(), StringValue.class, 0)
        .input(mapNode)
        .name("Count Words")
        .build();
      FileDataSink out = new FileDataSink(new CsvOutputFormat(), OUT_FILE, reduceNode, "Word Counts");
      CsvOutputFormat.configureRecordFormat(out)
        .recordDelimiter('\n')
        .fieldDelimiter(' ')
        .lenient(true)
        .field(StringValue.class, 0)
View Full Code Here

   
    iteration.setNextWorkset(updateComponentId);
    iteration.setSolutionSetDelta(updateComponentId);

    // create DataSinkContract for writing the new cluster positions
    FileDataSink result = new FileDataSink(new CsvOutputFormat(), output, iteration, "Result");
    CsvOutputFormat.configureRecordFormat(result)
      .recordDelimiter('\n')
      .fieldDelimiter(' ')
      .field(LongValue.class, 0)
      .field(LongValue.class, 1);
View Full Code Here

    String output    = (args.length > 1 ? args[1] : "");

    GenericDataSource<TableInputFormat> source = new GenericDataSource<TableInputFormat>(new MyTableInputFormat(), "HBase Input");
    source.setParameter(TableInputFormat.INPUT_TABLE, "twitter");
    source.setParameter(TableInputFormat.CONFIG_LOCATION, "/etc/hbase/conf/hbase-site.xml");
    FileDataSink out = new FileDataSink(new CsvOutputFormat(), output, source, "HBase String dump");
    CsvOutputFormat.configureRecordFormat(out)
      .recordDelimiter('\n')
      .fieldDelimiter(' ')
      .field(StringValue.class, 0)
      .field(StringValue.class, 1)
View Full Code Here

      .build();
    ReduceOperator reducer = ReduceOperator.builder(CountWords.class, StringValue.class, 0)
      .input(mapper)
      .name("Count Words")
      .build();
    FileDataSink out = new FileDataSink(new CsvOutputFormat(), output, reducer, "Word Counts");
    CsvOutputFormat.configureRecordFormat(out)
      .recordDelimiter('\n')
      .fieldDelimiter(' ')
      .field(StringValue.class, 0)
      .field(IntValue.class, 1);
View Full Code Here

TOP

Related Classes of org.apache.flink.api.java.record.io.CsvOutputFormat

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.