final KafkaSink kafkaSink = createKafkaProducer(jsonMapper, kafkaServer.getBrokerListStr());
Client client = createMockedESClient();
final ElasticSearchSink sink = new ElasticSearchSink(
null,
10,
1000,
null,
true,
"1s",
"1s",
null,
null,
0,0,0,0,true,null,
null,
jsonMapper,
client
);
sink.open();
RoutingMap map = new RoutingMap();
map.set(new ImmutableMap.Builder<String, RoutingMap.RoutingInfo>()
.put(TOPIC_NAME, new RoutingMap.RoutingInfo(Lists.newArrayList(new RoutingMap.Route("es", null, null)), null))
.build());
SinkManager sinks = new SinkManager();
sinks.initialSet(new ImmutableMap.Builder<String, Sink>()
.put("es", sink).build());
MessageRouter router = new MessageRouter(map, sinks, jsonMapper);
Properties properties = new Properties();
properties.setProperty("group.id", "testkafkaconsumer");
properties.setProperty("zookeeper.connect", zk.getConnectionString());
properties.setProperty("auto.offset.reset", "smallest");
properties.setProperty("consumer.timeout.ms", "1000");
KafkaConsumer consumer = new KafkaConsumer(properties, TOPIC_NAME, router, jsonMapper);
consumer.start();
// set the pause threshold to 100
QueuedSink.MAX_PENDING_MESSAGES_TO_PAUSE = 100;
Thread t = createProducerThread(jsonMapper, kafkaSink, TOPIC_NAME);
// wait until queue's is full over the threshold
int count = 0;
while (count < 3) {
if (sink.getNumOfPendingMessages() >= QueuedSink.MAX_PENDING_MESSAGES_TO_PAUSE) {
++count;
}
Thread.sleep(1000);
}
// get the number of pending messages for 10 seconds
ArrayList<Integer> countList = new ArrayList<Integer>();
for (int i = 0; i < 10; ++i) {
countList.add((int) sink.getNumOfPendingMessages());
Thread.sleep(1000);
}
for (int i = 6; i < 9; ++i) {
assertEquals(countList.get(i), countList.get(i + 1), 5);
}
rateLimiter.setRate(Double.MAX_VALUE);
run.set(false);
t.join();
consumer.shutdown();
sink.close();
assertEquals(sink.getNumOfPendingMessages(), 0);
}