String nnURL = getNameNodeURL(cluster);
logger.info("Namenode address: {}", nnURL);
Context chanCtx = new Context();
MemoryChannel channel = new MemoryChannel();
channel.setName("simpleHDFSTest-mem-chan");
channel.configure(chanCtx);
channel.start();
Context sinkCtx = new Context();
sinkCtx.put("hdfs.path", nnURL + outputDir);
sinkCtx.put("hdfs.fileType", HDFSWriterFactory.CompStreamType);
sinkCtx.put("hdfs.batchSize", Integer.toString(1));
sinkCtx.put("hdfs.codeC", "gzip");
HDFSEventSink sink = new HDFSEventSink();
sink.setName("simpleHDFSTest-hdfs-sink");
sink.configure(sinkCtx);
sink.setChannel(channel);
sink.start();
// create an event
String EVENT_BODY_1 = "yarg1";
String EVENT_BODY_2 = "yarg2";
channel.getTransaction().begin();
try {
channel.put(EventBuilder.withBody(EVENT_BODY_1, Charsets.UTF_8));
channel.put(EventBuilder.withBody(EVENT_BODY_2, Charsets.UTF_8));
channel.getTransaction().commit();
} finally {
channel.getTransaction().close();
}
// store event to HDFS
sink.process();
// shut down flume
sink.stop();
channel.stop();
// verify that it's in HDFS and that its content is what we say it should be
FileStatus[] statuses = fs.listStatus(outputDirPath);
Assert.assertNotNull("No files found written to HDFS", statuses);
Assert.assertEquals("Only one file expected", 1, statuses.length);