moveTaskOutputs(fs, src, src, tblPath, false);
fs.delete(src, true);
return;
}
HiveMetaStoreClient client = null;
HCatTableInfo tableInfo = jobInfo.getTableInfo();
List<Partition> partitionsAdded = new ArrayList<Partition>();
try {
HiveConf hiveConf = HCatUtil.getHiveConf(conf);
client = HCatUtil.getHiveClient(hiveConf);
StorerInfo storer = InternalUtil.extractStorerInfo(table.getTTable().getSd(),table.getParameters());
FileStatus tblStat = fs.getFileStatus(tblPath);
String grpName = tblStat.getGroup();
FsPermission perms = tblStat.getPermission();
List<Partition> partitionsToAdd = new ArrayList<Partition>();
if (!dynamicPartitioningUsed){
partitionsToAdd.add(
constructPartition(
context,jobInfo,
tblPath.toString(), jobInfo.getPartitionValues()
,jobInfo.getOutputSchema(), getStorerParameterMap(storer)
,table, fs
,grpName,perms));
}else{
for (Entry<String,Map<String,String>> entry : partitionsDiscoveredByPath.entrySet()){
partitionsToAdd.add(
constructPartition(
context,jobInfo,
getPartitionRootLocation(entry.getKey(),entry.getValue().size()), entry.getValue()
,jobInfo.getOutputSchema(), getStorerParameterMap(storer)
,table, fs
,grpName,perms));
}
}
ArrayList<Map<String,String>> ptnInfos = new ArrayList<Map<String,String>>();
for(Partition ptn : partitionsToAdd){
ptnInfos.add(InternalUtil.createPtnKeyValueMap(new Table(tableInfo.getTable()), ptn));
}
//Publish the new partition(s)
if (dynamicPartitioningUsed && harProcessor.isEnabled() && (!partitionsToAdd.isEmpty())){
Path src = new Path(ptnRootLocation);
// check here for each dir we're copying out, to see if it
// already exists, error out if so
moveTaskOutputs(fs, src, src, tblPath, true);
moveTaskOutputs(fs, src, src, tblPath, false);
fs.delete(src, true);
try {
updateTableSchema(client, table, jobInfo.getOutputSchema());
LOG.info("HAR is being used. The table {} has new partitions {}.", table.getTableName(), ptnInfos);
client.add_partitions(partitionsToAdd);
partitionsAdded = partitionsToAdd;
} catch (Exception e){
// There was an error adding partitions : rollback fs copy and rethrow
for (Partition p : partitionsToAdd){
Path ptnPath = new Path(harProcessor.getParentFSPath(new Path(p.getSd().getLocation())));
if (fs.exists(ptnPath)){
fs.delete(ptnPath,true);
}
}
throw e;
}
}else{
// no harProcessor, regular operation
updateTableSchema(client, table, jobInfo.getOutputSchema());
LOG.info("HAR not is not being used. The table {} has new partitions {}.", table.getTableName(), ptnInfos);
partitionsAdded = partitionsToAdd;
if (dynamicPartitioningUsed && (partitionsAdded.size()>0)){
Path src = new Path(ptnRootLocation);
moveTaskOutputs(fs, src, src, tblPath, true);
moveTaskOutputs(fs, src, src, tblPath, false);
fs.delete(src, true);
}
client.add_partitions(partitionsToAdd);
}
} catch (Exception e) {
if (partitionsAdded.size() > 0) {
try {
// baseCommitter.cleanupJob failed, try to clean up the
// metastore
for (Partition p : partitionsAdded) {
client.dropPartition(tableInfo.getDatabaseName(),
tableInfo.getTableName(), p.getValues());
}
} catch (Exception te) {
// Keep cause as the original exception
throw new HCatException(