}
}
}
// Next we do this for tables and partitions
loadTableDesc tbd = work.getLoadTableWork();
if (tbd != null) {
String mesg = "Loading data to table " + tbd.getTable().getTableName() +
((tbd.getPartitionSpec().size() > 0) ?
" partition " + tbd.getPartitionSpec().toString() : "");
String mesg_detail = " from " + tbd.getSourceDir();
console.printInfo(mesg, mesg_detail);
Table table = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tbd.getTable().getTableName());
if (work.getCheckFileFormat()) {
// Get all files from the src directory
FileStatus [] dirs;
ArrayList<FileStatus> files;
FileSystem fs;
try {
fs = FileSystem.get(table.getDataLocation(),conf);
dirs = fs.globStatus(new Path(tbd.getSourceDir()));
files = new ArrayList<FileStatus>();
for (int i=0; (dirs != null && i<dirs.length); i++) {
files.addAll(Arrays.asList(fs.listStatus(dirs[i].getPath())));
// We only check one file, so exit the loop when we have at least one.
if (files.size()>0) break;
}
} catch (IOException e) {
throw new HiveException("addFiles: filesystem error in check phase", e);
}
if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVECHECKFILEFORMAT)) {
// Check if the file format of the file matches that of the table.
boolean flag = HiveFileFormatUtils.checkInputFormat(fs, conf, tbd.getTable().getInputFileFormatClass(), files);
if (!flag) {
throw new HiveException(
"Wrong file format. Please check the file's format.");
}
}
}
if(tbd.getPartitionSpec().size() == 0) {
db.loadTable(new Path(tbd.getSourceDir()), tbd.getTable().getTableName(), tbd.getReplace(), new Path(tbd.getTmpDir()));
if (work.getOutputs() != null)
work.getOutputs().add(new WriteEntity(table));
} else {
LOG.info("Partition is: " + tbd.getPartitionSpec().toString());
db.loadPartition(new Path(tbd.getSourceDir()), tbd.getTable().getTableName(),
tbd.getPartitionSpec(), tbd.getReplace(), new Path(tbd.getTmpDir()));
Partition partn = db.getPartition(table, tbd.getPartitionSpec(), false);
if (work.getOutputs() != null)
work.getOutputs().add(new WriteEntity(partn));
}
}