StatsAggregator statsAggregator = null;
try {
// Stats setup:
Warehouse wh = new Warehouse(conf);
FileSystem fileSys;
FileStatus[] fileStatus;
if (!this.getWork().getNoStatsAggregator()) {
String statsImplementationClass = HiveConf.getVar(conf, HiveConf.ConfVars.HIVESTATSDBCLASS);
StatsFactory.setImplementation(statsImplementationClass, conf);
statsAggregator = StatsFactory.getStatsAggregator();
// manufacture a StatsAggregator
if (!statsAggregator.connect(conf)) {
throw new HiveException("StatsAggregator connect failed " + statsImplementationClass);
}
}
TableStatistics tblStats = new TableStatistics();
org.apache.hadoop.hive.metastore.api.Table tTable = table.getTTable();
Map<String, String> parameters = tTable.getParameters();
boolean tableStatsExist = this.existStats(parameters);
for (String statType : supportedStats) {
if (parameters.containsKey(statType)) {
tblStats.setStat(statType, Long.parseLong(parameters.get(statType)));
}
}
if (parameters.containsKey(StatsSetupConst.NUM_PARTITIONS)) {
tblStats.setNumPartitions(Integer.parseInt(parameters.get(StatsSetupConst.NUM_PARTITIONS)));
}
List<Partition> partitions = getPartitionsList();
boolean atomic = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_STATS_ATOMIC);
if (partitions == null) {
// non-partitioned tables:
if (!tableStatsExist && atomic) {
return 0;
}
Path tablePath = wh.getTablePath(db.getDatabase(table.getDbName()), table.getTableName());
fileSys = tablePath.getFileSystem(conf);
fileStatus = Utilities.getFileStatusRecurse(tablePath, 1, fileSys);
tblStats.setStat(StatsSetupConst.NUM_FILES, fileStatus.length);
long tableSize = 0L;