Package org.apache.hadoop.chukwa.extraction.engine

Examples of org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord


      // +"][" + testFile.getName() +"]");
      goldReader = new SequenceFile.Reader(fs, inputFile, conf);
      testReader = new SequenceFile.Reader(fs, testFile, conf);

      ChukwaRecordKey goldKey = new ChukwaRecordKey();
      ChukwaRecord goldRecord = new ChukwaRecord();

      ChukwaRecordKey testKey = new ChukwaRecordKey();
      ChukwaRecord testRecord = new ChukwaRecord();

      // log.info(">>>>>>>>>>>>>> Start reading");
      while (goldReader.next(goldKey, goldRecord)) {
        testReader.next(testKey, testRecord);

        if (goldKey.compareTo(testKey) != 0) {
          log.info(">>>>>>>>>>>>>> Not the same Key");
          log.info(">>>>>>>>>>>>>> Record [" + goldKey.getKey() + "] ["
              + goldKey.getReduceType() + "]");
          log.info(">>>>>>>>>>>>>> Record [" + testKey.getKey() + "] ["
              + testKey.getReduceType() + "]");
          return false;
        }

        if (goldRecord.compareTo(testRecord) != 0) {
          log.info(">>>>>>>>>>>>>> Not the same Value");
          log.info(">>>>>>>>>>>>>> Record [" + goldKey.getKey() + "] ["
              + goldKey.getReduceType() + "]");
          log.info(">>>>>>>>>>>>>> Record [" + testKey.getKey() + "] ["
              + testKey.getReduceType() + "]");
          log.info(">>>>>>>>>>>>>> Gold Value [" + goldRecord.toString() + "]");
          log.info(">>>>>>>>>>>>>> Test value [" + testRecord.toString() + "]");

          return false;
        }
      }
      // log.info(">>>>>>>>>>>>>> Same File");
View Full Code Here


    long currentTimeMillis = System.currentTimeMillis();
    boolean isSuccessful = true;
    String recordType = null;

    ChukwaRecordKey key = new ChukwaRecordKey();
    ChukwaRecord record = new ChukwaRecord();
    String cluster = null;
    int numOfRecords = 0;
    try {
      Pattern p = Pattern.compile("(.*)\\-(\\d+)$");
      int batch = 0;
      while (reader.next(key, record)) {
      numOfRecords++;
        if(first) {
          try {
            cluster = RecordUtil.getClusterName(record);
            initEnv(cluster);
            first=false;
          } catch(Exception ex) {
            log.error("Initialization failed for: "+cluster+".  Please check jdbc configuration.");
            return false;
          }
        }
        String sqlTime = DatabaseWriter.formatTimeStamp(record.getTime());
        log.debug("Timestamp: " + record.getTime());
        log.debug("DataType: " + key.getReduceType());

        String[] fields = record.getFields();
        String table = null;
        String[] priKeys = null;
        HashMap<String, HashMap<String, String>> hashReport = new HashMap<String, HashMap<String, String>>();
        StringBuilder normKey = new StringBuilder();
        String node = record.getValue("csource");
        recordType = key.getReduceType().toLowerCase();
        String dbKey = "report.db.name." + recordType;
        Matcher m = p.matcher(recordType);
        if (dbTables.containsKey(dbKey)) {
          String tableName = mdlConfig.get(dbKey);
          if (!RegexUtil.isRegex(tableName)) {
            log.error("Error parsing 'tableName' as a regex: "
                + RegexUtil.regexError(tableName));
            return false;
          }
          String[] tmp = mdlConfig.findTableName(tableName, record
              .getTime(), record.getTime());
          table = tmp[0];
        } else if(m.matches()) {
          String timePartition = "_week";
          int timeSize = Integer.parseInt(m.group(2));
          if(timeSize == 5) {
            timePartition = "_month";
          } else if(timeSize == 30) {
            timePartition = "_quarter";
          } else if(timeSize == 180) {
            timePartition = "_year";
          } else if(timeSize == 720) {
            timePartition = "_decade";
          }
          int partition = (int) (record.getTime() / timeSize);
          StringBuilder tmpDbKey = new StringBuilder();
          tmpDbKey.append("report.db.name.");
          tmpDbKey.append(m.group(1));
          if(dbTables.containsKey(tmpDbKey.toString())) {
            StringBuilder tmpTable = new StringBuilder();
            tmpTable.append(dbTables.get(tmpDbKey.toString()));
            tmpTable.append("_");
            tmpTable.append(partition);
            tmpTable.append("_");
            tmpTable.append(timePartition);
            table = tmpTable.toString();
          } else {
            log.debug(tmpDbKey.toString() + " does not exist.");
            continue;           
          }
        } else {
          log.debug(dbKey + " does not exist.");
          continue;
        }
        log.debug("table name:" + table);
        try {
          priKeys = mdlConfig.get("report.db.primary.key." + recordType).split(
              ",");
        } catch (Exception nullException) {
          log.debug(ExceptionUtil.getStackTrace(nullException));
        }
        for (String field : fields) {
          String keyName = escape(field.toLowerCase(), newSpace);
          String keyValue = escape(record.getValue(field).toLowerCase(),
              newSpace);
          StringBuilder buildKey = new StringBuilder();
          buildKey.append("normalize.");
          buildKey.append(recordType);
          buildKey.append(".");
          buildKey.append(keyName);
          if (normalize.containsKey(buildKey.toString())) {
            if (normKey.toString().equals("")) {
              normKey.append(keyName);
              normKey.append(".");
              normKey.append(keyValue);
            } else {
              normKey.append(".");
              normKey.append(keyName);
              normKey.append(".");
              normKey.append(keyValue);
            }
          }
          StringBuilder normalizedKey = new StringBuilder();
          normalizedKey.append("metric.");
          normalizedKey.append(recordType);
          normalizedKey.append(".");
          normalizedKey.append(normKey);
          if (hashReport.containsKey(node)) {
            HashMap<String, String> tmpHash = hashReport.get(node);
            tmpHash.put(normalizedKey.toString(), keyValue);
            hashReport.put(node, tmpHash);
          } else {
            HashMap<String, String> tmpHash = new HashMap<String, String>();
            tmpHash.put(normalizedKey.toString(), keyValue);
            hashReport.put(node, tmpHash);
          }
        }
        for (String field : fields) {
          String valueName = escape(field.toLowerCase(), newSpace);
          String valueValue = escape(record.getValue(field).toLowerCase(),
              newSpace);
          StringBuilder buildKey = new StringBuilder();
          buildKey.append("metric.");
          buildKey.append(recordType);
          buildKey.append(".");
View Full Code Here

      Reporter reporter) throws Throwable {
    try{
      Logger log = Logger.getLogger(NamenodeProcessor.class);
      long timeStamp = Calendar.getInstance(TimeZone.getTimeZone("UTC")).getTimeInMillis();
   
      final ChukwaRecord hdfs_overview = new ChukwaRecord();
      final ChukwaRecord hdfs_namenode = new ChukwaRecord();
      final ChukwaRecord namenode_jvm = new ChukwaRecord();
      final ChukwaRecord namenode_rpc = new ChukwaRecord();
   
      Map<String, ChukwaRecord> metricsMap = new HashMap<String,ChukwaRecord>(){
        private static final long serialVersionUID = 1L
        {         
          put("BlockCapacity", hdfs_overview);
          put("BlocksTotal", hdfs_overview);
          put("CapacityTotalGB", hdfs_overview);
          put("CapacityUsedGB", hdfs_overview);
          put("CapacityRemainingGB", hdfs_overview);
          put("CorruptBlocks", hdfs_overview);
          put("ExcessBlocks", hdfs_overview);
          put("FilesTotal", hdfs_overview);
          put("MissingBlocks", hdfs_overview);
          put("PendingDeletionBlocks", hdfs_overview);
          put("PendingReplicationBlocks", hdfs_overview);
          put("ScheduledReplicationBlocks", hdfs_overview);
          put("TotalLoad", hdfs_overview);
          put("UnderReplicatedBlocks", hdfs_overview);
         
          put("gcCount", namenode_jvm);
          put("gcTimeMillis", namenode_jvm);
          put("logError", namenode_jvm);
          put("logFatal", namenode_jvm);
          put("logInfo", namenode_jvm);
          put("logWarn", namenode_jvm);
          put("memHeapCommittedM", namenode_jvm);
          put("memHeapUsedM", namenode_jvm);
          put("threadsBlocked", namenode_jvm);
          put("threadsNew", namenode_jvm);
          put("threadsRunnable", namenode_jvm);
          put("threadsTerminated", namenode_jvm);
          put("threadsTimedWaiting", namenode_jvm);
          put("threadsWaiting", namenode_jvm);
 
          put("ReceivedBytes", namenode_rpc);       
          put("RpcProcessingTime_avg_time", namenode_rpc)
          put("RpcProcessingTime_num_ops", namenode_rpc)
          put("RpcQueueTime_avg_time", namenode_rpc)
          put("RpcQueueTime_num_ops", namenode_rpc)
          put("SentBytes", namenode_rpc)
          put("rpcAuthorizationSuccesses", namenode_rpc)
          put("rpcAuthenticationFailures", namenode_rpc)
          put("rpcAuthenticationSuccesses", namenode_rpc)
        }
      }
   
   
      JSONObject obj = (JSONObject) JSONValue.parse(recordEntry)
      String ttTag = chunk.getTag("timeStamp");
      if(ttTag == null){
        log.warn("timeStamp tag not set in JMX adaptor for namenode");
      }
      else{
        timeStamp = Long.parseLong(ttTag);
      }
      Iterator<JSONObject> iter = obj.entrySet().iterator();
     
     
      while(iter.hasNext()){
        Map.Entry entry = (Map.Entry)iter.next();
        String key = (String) entry.getKey();
        Object value = entry.getValue()
        String valueString = (value == null)?"":value.toString();
       
        //These metrics are string types with JSON structure. So we parse them and get the count
        if(key.equals("LiveNodes") || key.equals("DeadNodes") || key.equals("DecomNodes") || key.equals("NameDirStatuses")){         
          JSONObject jobj = (JSONObject) JSONValue.parse(valueString);
          valueString = Integer.toString(jobj.size());
       
       
        //Calculate rate for some of the metrics
        if(rateMap.containsKey(key)){
          long oldValue = rateMap.get(key);
          long curValue = Long.parseLong(valueString);
          rateMap.put(key, curValue);
          long newValue = curValue - oldValue;
          if(newValue < 0){
            log.error("NamenodeProcessor's rateMap might be reset or corrupted for metric "+key);
            newValue = 0L;
          }         
          valueString = Long.toString(newValue);
        }
       
        //Check if metric belongs to one of the categories in metricsMap. If not just write it in group Hadoop.HDFS.NameNode
        if(metricsMap.containsKey(key)){
          ChukwaRecord rec = metricsMap.get(key);
          rec.add(key, valueString);         
       
        else{
          hdfs_namenode.add(key, valueString);
        }
      }     
View Full Code Here

  protected void parse(String recordEntry,
      OutputCollector<ChukwaRecordKey, ChukwaRecord> output,
      Reporter reporter) throws Throwable {
    Logger log = Logger.getLogger(ZookeeperProcessor.class);
    long timeStamp = Calendar.getInstance(TimeZone.getTimeZone("UTC")).getTimeInMillis();
    final ChukwaRecord record = new ChukwaRecord();
   
    Map<String, ChukwaRecord> metricsMap = new HashMap<String, ChukwaRecord>(){
      private static final long serialVersionUID = 1L;

      {
        put("MinRequestLatency", record);
        put("AvgRequestLatency", record);
        put("MaxRequestLatency", record);
        put("PacketsReceived", record);
        put("PacketsSent", record);     
        put("OutstandingRequests", record);
        put("NodeCount", record);
        put("WatchCount", record);
      }
    };
    try{
      JSONObject obj = (JSONObject) JSONValue.parse(recordEntry);
      String ttTag = chunk.getTag("timeStamp");
      if(ttTag == null){
        log.warn("timeStamp tag not set in JMX adaptor for zookeeper");
      }
      else{
        timeStamp = Long.parseLong(ttTag);
      }
      Iterator<JSONObject> iter = obj.entrySet().iterator();
     
      while(iter.hasNext()){
        Map.Entry entry = (Map.Entry)iter.next();
        String key = (String) entry.getKey();
        Object value = entry.getValue();
        String valueString = value == null?"":value.toString()
       
        if(metricsMap.containsKey(key)){
          ChukwaRecord rec = metricsMap.get(key);
          rec.add(key, valueString);
        }       
      }     
     
      buildGenericRecord(record, null, timeStamp, "zk");
      output.collect(key, record);
View Full Code Here

  protected void parse(String recordEntry,
      OutputCollector<ChukwaRecordKey, ChukwaRecord> output, Reporter reporter)
      throws Throwable {
    JSONObject json = (JSONObject) JSONValue.parse(recordEntry);
    long timestamp = ((Long)json.get("timestamp")).longValue();
    ChukwaRecord record = new ChukwaRecord();
    Calendar cal = Calendar.getInstance(TimeZone.getTimeZone("UTC"));
    cal.setTimeInMillis(timestamp);
    cal.set(Calendar.SECOND, 0);
    cal.set(Calendar.MILLISECOND, 0);
    JSONArray cpuList = (JSONArray) json.get("cpu");
    double combined = 0.0;
    double user = 0.0;
    double sys = 0.0;
    double idle = 0.0;
    for(int i = 0; i< cpuList.size(); i++) {
      JSONObject cpu = (JSONObject) cpuList.get(i);
      Iterator<String> keys = cpu.keySet().iterator();
      combined = combined + Double.parseDouble(cpu.get("combined").toString());
      user = user + Double.parseDouble(cpu.get("user").toString());
      sys = sys + Double.parseDouble(cpu.get("sys").toString());
      idle = idle + Double.parseDouble(cpu.get("idle").toString());
      while(keys.hasNext()) {
        String key = keys.next();
        record.add(key + "." + i, cpu.get(key).toString());
      }
    }
    combined = combined / cpuList.size();
    user = user / cpuList.size();
    sys = sys / cpuList.size();
    idle = idle / cpuList.size();
    record.add("combined", Double.toString(combined));
    record.add("user", Double.toString(user));
    record.add("idle", Double.toString(idle));   
    record.add("sys", Double.toString(sys));
    buildGenericRecord(record, null, cal.getTimeInMillis(), "cpu");
    output.collect(key, record);   

    record = new ChukwaRecord();
    record.add("Uptime", json.get("uptime").toString());
    JSONArray loadavg = (JSONArray) json.get("loadavg");
    record.add("LoadAverage.1", loadavg.get(0).toString());
    record.add("LoadAverage.5", loadavg.get(1).toString());
    record.add("LoadAverage.15", loadavg.get(2).toString());
    buildGenericRecord(record, null, cal.getTimeInMillis(), "system");
    output.collect(key, record);   

    record = new ChukwaRecord();
    JSONObject memory = (JSONObject) json.get("memory");
    Iterator<String> memKeys = memory.keySet().iterator();
    while(memKeys.hasNext()) {
      String key = memKeys.next();
      record.add(key, memory.get(key).toString());
    }
    buildGenericRecord(record, null, cal.getTimeInMillis(), "memory");
    output.collect(key, record);   

    record = new ChukwaRecord();
    JSONObject swap = (JSONObject) json.get("swap");
    Iterator<String> swapKeys = swap.keySet().iterator();
    while(swapKeys.hasNext()) {
      String key = swapKeys.next();
      record.add(key, swap.get(key).toString());
    }
    buildGenericRecord(record, null, cal.getTimeInMillis(), "swap");
    output.collect(key, record);
   
    double rxBytes = 0;
    double rxDropped = 0;
    double rxErrors = 0;
    double rxPackets = 0;
    double txBytes = 0;
    double txCollisions = 0;
    double txErrors = 0;
    double txPackets = 0;
    record = new ChukwaRecord();
    JSONArray netList = (JSONArray) json.get("network");
    for(int i = 0;i < netList.size(); i++) {
      JSONObject netIf = (JSONObject) netList.get(i);
      Iterator<String> keys = netIf.keySet().iterator();
      while(keys.hasNext()) {
        String key = keys.next();
        record.add(key + "." + i, netIf.get(key).toString());
        if(i!=0) {
          if(key.equals("RxBytes")) {
            rxBytes = rxBytes + (Long) netIf.get(key);
          } else if(key.equals("RxDropped")) {
            rxDropped = rxDropped + (Long) netIf.get(key);
          } else if(key.equals("RxErrors")) {         
            rxErrors = rxErrors + (Long) netIf.get(key);
          } else if(key.equals("RxPackets")) {
            rxPackets = rxPackets + (Long) netIf.get(key);
          } else if(key.equals("TxBytes")) {
            txBytes = txBytes + (Long) netIf.get(key);
          } else if(key.equals("TxCollisions")) {
            txCollisions = txCollisions + (Long) netIf.get(key);
          } else if(key.equals("TxErrors")) {
            txErrors = txErrors + (Long) netIf.get(key);
          } else if(key.equals("TxPackets")) {
            txPackets = txPackets + (Long) netIf.get(key);
          }
        }
      }
    }
    buildGenericRecord(record, null, cal.getTimeInMillis(), "network");
    record.add("RxBytes", Double.toString(rxBytes));
    record.add("RxDropped", Double.toString(rxDropped));
    record.add("RxErrors", Double.toString(rxErrors));
    record.add("RxPackets", Double.toString(rxPackets));
    record.add("TxBytes", Double.toString(txBytes));
    record.add("TxCollisions", Double.toString(txCollisions));
    record.add("TxErrors", Double.toString(txErrors));
    record.add("TxPackets", Double.toString(txPackets));
    output.collect(key, record);   
   
    double readBytes = 0;
    double reads = 0;
    double writeBytes = 0;
    double writes = 0;
    record = new ChukwaRecord();
    JSONArray diskList = (JSONArray) json.get("disk");
    for(int i = 0;i < diskList.size(); i++) {
      JSONObject disk = (JSONObject) diskList.get(i);
      Iterator<String> keys = disk.keySet().iterator();
      while(keys.hasNext()) {
        String key = keys.next();
        record.add(key + "." + i, disk.get(key).toString());
        if(key.equals("ReadBytes")) {
          readBytes = readBytes + (Long) disk.get("ReadBytes");
        } else if(key.equals("Reads")) {
          reads = reads + (Long) disk.get("Reads");
        } else if(key.equals("WriteBytes")) {
          writeBytes = writeBytes + (Long) disk.get("WriteBytes");
        } else if(key.equals("Writes")) {
          writes = writes + (Long) disk.get("Writes");
        }
      }
    }
    record.add("ReadBytes", Double.toString(readBytes));
    record.add("Reads", Double.toString(reads));
    record.add("WriteBytes", Double.toString(writeBytes));
    record.add("Writes", Double.toString(writes));   
    buildGenericRecord(record, null, cal.getTimeInMillis(), "disk");
    output.collect(key, record);
   
    record = new ChukwaRecord();
    record.add("cluster", chunk.getTag("cluster"));
    buildGenericRecord(record, null, cal.getTimeInMillis(), "tags");
    output.collect(key, record);
  }
View Full Code Here

    Iterator<Entry<ChukwaRecordKey, ChukwaRecord>> recordIter = outData
        .entrySet().iterator();
    while (recordIter.hasNext()) {
      Entry<ChukwaRecordKey, ChukwaRecord> recordEntry = recordIter
          .next();
      ChukwaRecord value = recordEntry.getValue();
      String[] fields = value.getFields();
      for (String field : fields) {
        //ignore ctags
        if(field.equals("ctags")) {
          continue;
        }
        String data = value.getValue(field);
        String expected = String.valueOf(inData.get(field));
        /*System.out.println("Metric, expected data, received data- " +
         field + ", " + expected + ", " +data);
        */
        if (!expected.equals(data)) {
View Full Code Here

      OutputCollector<ChukwaRecordKey, ChukwaRecord> output,
      Reporter reporter) throws Throwable {
   
    Logger log = Logger.getLogger(HBaseRegionServerProcessor.class);
    long timeStamp = Calendar.getInstance(TimeZone.getTimeZone("UTC")).getTimeInMillis();
    ChukwaRecord record = new ChukwaRecord();
   
    Map<String, Buffer> metricsMap = new HashMap<String,Buffer>();

    try{
      JSONObject obj = (JSONObject) JSONValue.parse(recordEntry)
      String ttTag = chunk.getTag("timeStamp");
      if(ttTag == null){
        log.warn("timeStamp tag not set in JMX adaptor for hbase region server");
      }
      else{
        timeStamp = Long.parseLong(ttTag);
      }
      Iterator<JSONObject> iter = obj.entrySet().iterator();
     
      while(iter.hasNext()){
        Map.Entry entry = (Map.Entry)iter.next();
        String key = (String) entry.getKey();
        Object value = entry.getValue();
        String valueString = value == null?"":value.toString()
        Buffer b = new Buffer(valueString.getBytes());
        metricsMap.put(key,b);           
      }     
     
      TreeMap<String, Buffer> t = new TreeMap<String, Buffer>(metricsMap);
      record.setMapFields(t);
      buildGenericRecord(record, null, timeStamp, "regionserver");     
      output.collect(key, record);
    }
    catch(Exception e){
      log.error(ExceptionUtil.getStackTrace(e));
View Full Code Here

      else {
        dStr = recordEntry;
      }

      Date d = sdf.parse(dStr);
      ChukwaRecord record = new ChukwaRecord();
      this.buildGenericRecord(record, recordEntry, d.getTime(), chunk
          .getDataType());
      output.collect(key, record);
    } catch (ParseException e) {
      log.warn("Unable to parse the date in DefaultProcessor [" + recordEntry
View Full Code Here

      OutputCollector<ChukwaRecordKey, ChukwaRecord> output,
      Reporter reporter) throws Throwable {
    Logger log = Logger.getLogger(JobTrackerProcessor.class);
    long timeStamp = Calendar.getInstance(TimeZone.getTimeZone("UTC")).getTimeInMillis();
   
    final ChukwaRecord mapred_jt = new ChukwaRecord();
    final ChukwaRecord jt_jvm = new ChukwaRecord();
    final ChukwaRecord jt_rpc = new ChukwaRecord();
   
    Map<String, ChukwaRecord> metricsMap = new HashMap<String, ChukwaRecord>(){
      private static final long serialVersionUID = 1L;
      {
        put("gcCount", jt_jvm);
        put("gcTimeMillis", jt_jvm);
        put("logError", jt_jvm);
        put("logFatal", jt_jvm);
        put("logInfo", jt_jvm);
        put("logWarn", jt_jvm);
        put("memHeapCommittedM", jt_jvm);
        put("memHeapUsedM", jt_jvm);
        put("threadsBlocked", jt_jvm);
        put("threadsNew", jt_jvm);
        put("threadsRunnable", jt_jvm);
        put("threadsTerminated", jt_jvm);
        put("threadsTimedWaiting", jt_jvm);
        put("threadsWaiting", jt_jvm);

        put("ReceivedBytes", jt_rpc);       
        put("RpcProcessingTime_avg_time", jt_rpc)
        put("RpcProcessingTime_num_ops", jt_rpc)
        put("RpcQueueTime_avg_time", jt_rpc)
        put("RpcQueueTime_num_ops", jt_rpc)
        put("SentBytes", jt_rpc)
        put("rpcAuthorizationSuccesses", jt_rpc)
        put("rpcAuthorizationnFailures", jt_rpc)
      }
    };   
    try{
      JSONObject obj = (JSONObject) JSONValue.parse(recordEntry)
      String ttTag = chunk.getTag("timeStamp");
      if(ttTag == null){
        log.warn("timeStamp tag not set in JMX adaptor for jobtracker");
      }
      else{
        timeStamp = Long.parseLong(ttTag);
      }
      Iterator<JSONObject> iter = obj.entrySet().iterator();
     
      while(iter.hasNext()){
        Map.Entry entry = (Map.Entry)iter.next();
        String key = (String) entry.getKey();
        Object value = entry.getValue();
        String valueString = value == null?"":value.toString()
       
        //Calculate rate for some of the metrics
        if(rateMap.containsKey(key)){
          long oldValue = rateMap.get(key);
          long curValue = Long.parseLong(valueString);
          rateMap.put(key, curValue);
          long newValue = curValue - oldValue;
          if(newValue < 0){
            log.warn("JobTrackerProcessor's rateMap might be reset or corrupted for metric "+key);           
            newValue = 0L;
          }         
          valueString = Long.toString(newValue);
        }
       
        //These metrics are string types with JSON structure. So we parse them and get the count
        if(key.indexOf("Json") >= 0){ 
          //ignore these for now. Parsing of JSON array is throwing class cast exception.
       
        else if(metricsMap.containsKey(key)){
          ChukwaRecord rec = metricsMap.get(key);
          rec.add(key, valueString);
        }
        else {
          mapred_jt.add(key, valueString);
        }
      }     
View Full Code Here

      OutputCollector<ChukwaRecordKey, ChukwaRecord> output,
      Reporter reporter) throws Throwable {
    Logger log = Logger.getLogger(DatanodeProcessor.class);
    long timeStamp = Calendar.getInstance(TimeZone.getTimeZone("UTC")).getTimeInMillis();   
   
    final ChukwaRecord hdfs_datanode = new ChukwaRecord();
    final ChukwaRecord datanode_jvm = new ChukwaRecord();
    final ChukwaRecord datanode_rpc = new ChukwaRecord();
   
    Map<String, ChukwaRecord> metricsMap = new HashMap<String, ChukwaRecord>(){
      private static final long serialVersionUID = 1L;

      {
        put("blocks_verified", hdfs_datanode);
        put("blocks_written", hdfs_datanode);
        put("blocks_read", hdfs_datanode);
        put("blocks_replicated", hdfs_datanode);
        put("blocks_removed", hdfs_datanode);
        put("bytes_written", hdfs_datanode);
        put("bytes_read", hdfs_datanode);
        put("heartBeats_avg_time", hdfs_datanode);
        put("heartBeats_num_ops", hdfs_datanode);
       
        put("gcCount", datanode_jvm);
        put("gcTimeMillis", datanode_jvm);
        put("logError", datanode_jvm);
        put("logFatal", datanode_jvm);
        put("logInfo", datanode_jvm);
        put("logWarn", datanode_jvm);
        put("memHeapCommittedM", datanode_jvm);
        put("memHeapUsedM", datanode_jvm);
        put("threadsBlocked", datanode_jvm);
        put("threadsNew", datanode_jvm);
        put("threadsRunnable", datanode_jvm);
        put("threadsTerminated", datanode_jvm);
        put("threadsTimedWaiting", datanode_jvm);
        put("threadsWaiting", datanode_jvm);

        put("ReceivedBytes", datanode_rpc);       
        put("RpcProcessingTime_avg_time", datanode_rpc)
        put("RpcProcessingTime_num_ops", datanode_rpc)
        put("RpcQueueTime_avg_time", datanode_rpc)
        put("RpcQueueTime_num_ops", datanode_rpc)
        put("SentBytes", datanode_rpc)
        put("rpcAuthorizationSuccesses", datanode_rpc);
      }
    };
    try{
      JSONObject obj = (JSONObject) JSONValue.parse(recordEntry)
      String ttTag = chunk.getTag("timeStamp");
      if(ttTag == null){
        log.warn("timeStamp tag not set in JMX adaptor for datanode");
      }
      else{
        timeStamp = Long.parseLong(ttTag);
      }
      Iterator<JSONObject> iter = obj.entrySet().iterator();
     
      while(iter.hasNext()){
        Map.Entry entry = (Map.Entry)iter.next();
        String key = (String) entry.getKey();
        Object value = entry.getValue();
        String valueString = value == null?"":value.toString()
       
        //Calculate rate for some of the metrics
        if(rateMap.containsKey(key)){
          long oldValue = rateMap.get(key);
          long curValue = Long.parseLong(valueString);
          rateMap.put(key, curValue);
          long newValue = curValue - oldValue;
          if(newValue < 0){
            log.error("DatanodeProcessor's rateMap might be reset or corrupted for metric "+key);
            newValue = 0L;
          }         
          valueString = Long.toString(newValue);
        }
       
        if(metricsMap.containsKey(key)){
          ChukwaRecord rec = metricsMap.get(key);
          rec.add(key, valueString);
        }       
      }
      buildGenericRecord(hdfs_datanode, null, timeStamp, "dn");
      output.collect(key, hdfs_datanode);
      buildGenericRecord(datanode_jvm, null, timeStamp, "jvm");
View Full Code Here

TOP

Related Classes of org.apache.hadoop.chukwa.extraction.engine.ChukwaRecord

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.