/* Kinesis checkpoint interval. Same as batchInterval for this example. */
Duration checkpointInterval = batchInterval;
/* Setup the StreamingContext */
JavaStreamingContext jssc = new JavaStreamingContext(sparkConfig, batchInterval);
/* Create the same number of Kinesis DStreams/Receivers as Kinesis stream's shards */
List<JavaDStream<byte[]>> streamsList = new ArrayList<JavaDStream<byte[]>>(numStreams);
for (int i = 0; i < numStreams; i++) {
streamsList.add(
KinesisUtils.createStream(jssc, streamName, endpointUrl, checkpointInterval,
InitialPositionInStream.LATEST, StorageLevel.MEMORY_AND_DISK_2())
);
}
/* Union all the streams if there is more than 1 stream */
JavaDStream<byte[]> unionStreams;
if (streamsList.size() > 1) {
unionStreams = jssc.union(streamsList.get(0), streamsList.subList(1, streamsList.size()));
} else {
/* Otherwise, just use the 1 stream */
unionStreams = streamsList.get(0);
}
/*
* Split each line of the union'd DStreams into multiple words using flatMap to produce the collection.
* Convert lines of byte[] to multiple Strings by first converting to String, then splitting on WORD_SEPARATOR.
*/
JavaDStream<String> words = unionStreams.flatMap(new FlatMapFunction<byte[], String>() {
@Override
public Iterable<String> call(byte[] line) {
return Lists.newArrayList(WORD_SEPARATOR.split(new String(line)));
}
});
/* Map each word to a (word, 1) tuple, then reduce/aggregate by word. */
JavaPairDStream<String, Integer> wordCounts = words.mapToPair(
new PairFunction<String, String, Integer>() {
@Override
public Tuple2<String, Integer> call(String s) {
return new Tuple2<String, Integer>(s, 1);
}
}).reduceByKey(new Function2<Integer, Integer, Integer>() {
@Override
public Integer call(Integer i1, Integer i2) {
return i1 + i2;
}
});
/* Print the first 10 wordCounts */
wordCounts.print();
/* Start the streaming context and await termination */
jssc.start();
jssc.awaitTermination();
}