private void publishStats() throws HiveException {
boolean isStatsReliable = conf.isStatsReliable();
// Initializing a stats publisher
StatsPublisher statsPublisher = Utilities.getStatsPublisher(jc);
if (statsPublisher == null) {
// just return, stats gathering should not block the main query
LOG.error("StatsPublishing error: StatsPublisher is not initialized.");
if (isStatsReliable) {
throw new HiveException(ErrorMsg.STATSPUBLISHER_NOT_OBTAINED.getErrorCodedMsg());
}
return;
}
if (!statsPublisher.connect(hconf)) {
// just return, stats gathering should not block the main query
LOG.error("StatsPublishing error: cannot connect to database");
if (isStatsReliable) {
throw new HiveException(ErrorMsg.STATSPUBLISHER_CONNECTION_ERROR.getErrorCodedMsg());
}
return;
}
String taskID = Utilities.getTaskIdFromFilename(Utilities.getTaskId(hconf));
String spSpec = conf.getStaticSpec();
int maxKeyLength = conf.getMaxStatsKeyPrefixLength();
boolean taskIndependent = statsPublisher instanceof StatsCollectionTaskIndependent;
for (Map.Entry<String, FSPaths> entry : valToPaths.entrySet()) {
String fspKey = entry.getKey(); // DP/LB
FSPaths fspValue = entry.getValue();
// for bucketed tables, hive.optimize.sort.dynamic.partition optimization
// adds the taskId to the fspKey.
if (conf.getDpSortState().equals(DPSortState.PARTITION_BUCKET_SORTED)) {
taskID = Utilities.getTaskIdFromFilename(fspKey);
// if length of (prefix/ds=__HIVE_DEFAULT_PARTITION__/000000_0) is greater than max key prefix
// and if (prefix/ds=10/000000_0) is less than max key prefix, then former will get hashed
// to a smaller prefix (MD5hash/000000_0) and later will stored as such in staging stats table.
// When stats gets aggregated in StatsTask only the keys that starts with "prefix" will be fetched.
// Now that (prefix/ds=__HIVE_DEFAULT_PARTITION__) is hashed to a smaller prefix it will
// not be retrieved from staging table and hence not aggregated. To avoid this issue
// we will remove the taskId from the key which is redundant anyway.
fspKey = fspKey.split(taskID)[0];
}
// split[0] = DP, split[1] = LB
String[] split = splitKey(fspKey);
String dpSpec = split[0];
String lbSpec = split[1];
String prefix;
String postfix=null;
if (taskIndependent) {
// key = "database.table/SP/DP/"LB/
prefix = conf.getTableInfo().getTableName();
} else {
// key = "prefix/SP/DP/"LB/taskID/
prefix = conf.getStatsAggPrefix();
postfix = Utilities.join(lbSpec, taskID);
}
prefix = Utilities.join(prefix, spSpec, dpSpec);
prefix = Utilities.getHashedStatsPrefix(prefix, maxKeyLength);
String key = Utilities.join(prefix, postfix);
Map<String, String> statsToPublish = new HashMap<String, String>();
for (String statType : fspValue.stat.getStoredStats()) {
statsToPublish.put(statType, Long.toString(fspValue.stat.getStat(statType)));
}
if (!statsPublisher.publishStat(key, statsToPublish)) {
// The original exception is lost.
// Not changing the interface to maintain backward compatibility
if (isStatsReliable) {
throw new HiveException(ErrorMsg.STATSPUBLISHER_PUBLISHING_ERROR.getErrorCodedMsg());
}
}
}
if (!statsPublisher.closeConnection()) {
// The original exception is lost.
// Not changing the interface to maintain backward compatibility
if (isStatsReliable) {
throw new HiveException(ErrorMsg.STATSPUBLISHER_CLOSING_ERROR.getErrorCodedMsg());
}