package com.tts.jtcollector;
import com.tts.util.AvroSchemaSystem;
import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.message.MessageAndMetadata;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.apache.log4j.Logger;
import java.io.FileInputStream;
import java.io.IOException;
import java.util.*;
public class MetricsConsumer extends Thread {
static Logger log = Logger.getLogger(MetricsConsumer.class.getName());
private static Properties server_props = new Properties();
private static String properties_file = "";
private static Properties consumer_props = null;
private static TSDClient tsd = null;
private boolean time_to_shutdown = false;
public static ArrayList<MetricsConsumer> threads = new ArrayList<MetricsConsumer>();
private String topic = "";
private static List<String> listOfMetricsForTSD = null;
private static String metrics_topic;
private static String metrics_schema_id;
private static String prefixDelimiter;
private static String schemaRepoPath;
private static AvroSchemaSystem avro;
private static Schema reader_schema;
private static boolean useTSD;
private static ConsumerConnector consumer;
private static boolean useAvro;
private static KafkaStream<byte[], byte[]> stream;
public MetricsConsumer(KafkaStream<byte[], byte[]> topic_stream) {
stream = topic_stream;
}
public void freeResources() {
time_to_shutdown = true;
}
public static void shutdown(String[] args) {
for (MetricsConsumer worker : threads ) {
worker.freeResources();
worker.interrupt();
log.info("Shutting down: " + worker.getName());
}
if(useTSD) {
tsd.interrupt();
tsd.freeResources();
log.info("Shutting down: " + tsd.getName());
}
}
public void run() {
while (!time_to_shutdown) {
try {
ConsumerIterator<byte[], byte[]> it = stream.iterator();
while (it.hasNext() && !time_to_shutdown) {
// Get the next message to be processed
MessageAndMetadata<byte[], byte[]> message = it.next();
try {
// Get Message body
byte[] msg = message.message();
String metric = "";
if (useAvro) {
GenericRecord decoded = MetricsConsumer.avro.decodeKafkaMessage(msg, topic, metrics_schema_id, true);
if (decoded == null) {
log.error("Can't decode message");
continue;
}
// Decode and recreate the original metric
metric = decoded.get("metric_id").toString() + " " +
decoded.get("timestamp").toString() + " " +
decoded.get("value").toString() + " ";
// now append the array of tags key values
GenericData.Array tags = (GenericData.Array) decoded.get("tags");
Iterator tags_it = tags.iterator();
while (tags_it.hasNext()) {
metric += tags_it.next().toString();
if (tags_it.hasNext())
metric += " ";
}
} else {
metric = new String(msg);
}
if (metric != null) {
// Add this job id to the list of canceled job ids
synchronized (listOfMetricsForTSD) {
listOfMetricsForTSD.add(metric);
}
}
} catch(Exception e) {
log.error("Can't decode message", e);
}
}
} catch (Exception e) {
log.error("Something happended: ", e);
}
}
}
public static void main(String[] args) throws IOException {
int num_threads = 0;
// Get properties file from args
for (String arg : args) {
if (arg.contains("properties")) {
properties_file = arg;
}
}
// Load properties file
server_props.load(new FileInputStream(properties_file));
num_threads = Integer.parseInt(server_props.getProperty("number.consumer.threads", "1"));
metrics_topic = server_props.getProperty("kafka.metrics.topic.name", "metrics");
// Get Avro schema for metrics
String schemaRepoHost = server_props.getProperty("avro.schema.repo.host");
if (schemaRepoHost.length() > 0 && (!schemaRepoHost.equals("avro.example.com")))
useAvro = true;
if (useAvro) {
String schemaRepoPort = server_props.getProperty("avro.schema.repo.port", "2876");
metrics_schema_id = server_props.getProperty("avro.schema.id", "0");
prefixDelimiter = server_props.getProperty("prefix.delimiter", "");
schemaRepoPath = server_props.getProperty("avro.schema.repo.path");
// Normalize the schema repo path
if (!schemaRepoPath.substring(schemaRepoPath.length() - 1).equals("/"))
schemaRepoPath = schemaRepoPath + "/";
avro = new AvroSchemaSystem(schemaRepoHost, schemaRepoPort, prefixDelimiter, schemaRepoPath);
reader_schema = MetricsConsumer.avro.getSchema(MetricsConsumer.metrics_topic, MetricsConsumer.metrics_schema_id);
}
// Connect to TSD if host set, otherwise, check Avro and Kafka
String tsdHost = server_props.getProperty("tsd.host");
String tsdPort = null;
if (tsdHost.length() > 0 && (!tsdHost.equals("tsd.example.com")))
useTSD = true;
if (useTSD) {
tsdPort = server_props.getProperty("tsd.port", "4242");
listOfMetricsForTSD = Collections.synchronizedList(new ArrayList<String>());
// Start collector loader
log.info("Spawning tsd thread...");
tsd = new TSDClient(tsdHost, tsdPort, 5, listOfMetricsForTSD);
tsd.setName("TSD-Thread");
tsd.start();
} else {
log.info("MetricsConsumer not using TSD...");
}
try {
consumer_props = new Properties();
// Get consumer properties
consumer_props.put("group.id", server_props.getProperty("group.id", ""));
String str = server_props.getProperty("client.id");
if (str != null)
consumer_props.put("client.id", str);
consumer_props.put("zookeeper.connect", server_props.getProperty("zookeeper.connect", ""));
str = server_props.getProperty("consumer.id");
if (str != null)
consumer_props.put("consumer.id", str);
consumer_props.put("socket.timeout.ms", "200");
consumer_props.put("socket.receive.buffer.bytes", server_props.getProperty("socket.receive.buffer.bytes", "65536"));
consumer_props.put("fetch.message.max.bytes", server_props.getProperty("fetch.message.max.bytes", "1048576"));
consumer_props.put("auto.commit.enable", server_props.getProperty("auto.commit.enable", "true"));
consumer_props.put("auto.commit.interval.ms", server_props.getProperty("auto.commit.interval.ms", "60000"));
consumer_props.put("queued.max.message.chunks", server_props.getProperty("queued.max.message.chunks", "10"));
consumer_props.put("rebalance.max.retries", server_props.getProperty("rebalance.max.retries", "4"));
consumer_props.put("fetch.min.bytes", server_props.getProperty("fetch.min.bytes", "1"));
consumer_props.put("fetch.wait.max.ms", server_props.getProperty("fetch.wait.max.ms", "100"));
consumer_props.put("rebalance.backoff.ms", server_props.getProperty("rebalance.backoff.ms", "2000"));
consumer_props.put("refresh.leader.backoff.ms", server_props.getProperty("refresh.leader.backoff.ms", "200"));
consumer_props.put("auto.offset.reset", server_props.getProperty("auto.offset.reset", "largest"));
consumer_props.put("consumer.timeout.ms", server_props.getProperty("consumer.timeout.ms", "-1"));
consumer_props.put("zookeeper.session.timeout.ms", server_props.getProperty("zookeeper.session.timeout.ms", "6000"));
consumer_props.put("zookeeper.connection.timeout.ms", server_props.getProperty("zookeeper.connection.timeout.ms", "6000"));
consumer_props.put("zookeeper.sync.time.ms", server_props.getProperty("zookeeper.sync.time.ms", "2000"));
// Setup consumer
consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(consumer_props));
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(metrics_topic, 1);
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
// Start Consumer Threads
for (int x = 0; x < num_threads; x++ ) {
MetricsConsumer thread = new MetricsConsumer(consumerMap.get(metrics_topic).get(0));
thread.setName("Metrics-Consumer-Thread-" + x);
threads.add(thread);
thread.start();
log.info("Spawning consumer threads...");
}
} catch (Exception e) {
log.error("Error loading properties.", e);
e.printStackTrace();
System.exit(1);
}
}
}