Database db = new Database();
db.setName(dbName);
client.createDatabase(db);
client.dropType(typeName);
Type typ1 = new Type();
typ1.setName(typeName);
typ1.setFields(new ArrayList<FieldSchema>(2));
typ1.getFields().add(
new FieldSchema("name", Constants.STRING_TYPE_NAME, ""));
typ1.getFields().add(
new FieldSchema("income", Constants.INT_TYPE_NAME, ""));
client.createType(typ1);
Table tbl = new Table();
tbl.setDbName(dbName);
tbl.setTableName(tblName);
StorageDescriptor sd = new StorageDescriptor();
tbl.setSd(sd);
sd.setCols(typ1.getFields());
sd.setCompressed(false);
sd.setNumBuckets(1);
sd.setParameters(new HashMap<String, String>());
sd.getParameters().put("test_param_1", "Use this for comments etc");
sd.setBucketCols(new ArrayList<String>(2));
sd.getBucketCols().add("name");
sd.setSerdeInfo(new SerDeInfo());
sd.getSerdeInfo().setName(tbl.getTableName());
sd.getSerdeInfo().setParameters(new HashMap<String, String>());
sd.getSerdeInfo().getParameters().put(
org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "1");
sd.getSerdeInfo().setSerializationLib(
org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
tbl.setPartitionKeys(new ArrayList<FieldSchema>());
client.createTable(tbl);
if (isThriftClient) {
// the createTable() above does not update the location in the 'tbl'
// object when the client is a thrift client and the code below relies
// on the location being present in the 'tbl' object - so get the table
// from the metastore
tbl = client.getTable(dbName, tblName);
}
Table tbl2 = client.getTable(dbName, tblName);
assertNotNull(tbl2);
assertEquals(tbl2.getDbName(), dbName);
assertEquals(tbl2.getTableName(), tblName);
assertEquals(tbl2.getSd().getCols().size(), typ1.getFields().size());
assertEquals(tbl2.getSd().isCompressed(), false);
assertEquals(tbl2.getSd().getNumBuckets(), 1);
assertEquals(tbl2.getSd().getLocation(), tbl.getSd().getLocation());
assertNotNull(tbl2.getSd().getSerdeInfo());
sd.getSerdeInfo().setParameters(new HashMap<String, String>());
sd.getSerdeInfo().getParameters().put(
org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "1");
tbl2.setTableName(tblName2);
tbl2.setParameters(new HashMap<String, String>());
tbl2.getParameters().put("EXTERNAL", "TRUE");
tbl2.getSd().setLocation(tbl.getSd().getLocation() + "-2");
List<FieldSchema> fieldSchemas = client.getFields(dbName, tblName);
assertNotNull(fieldSchemas);
assertEquals(fieldSchemas.size(), tbl.getSd().getCols().size());
for (FieldSchema fs : tbl.getSd().getCols()) {
assertTrue(fieldSchemas.contains(fs));
}
List<FieldSchema> fieldSchemasFull = client.getSchema(dbName, tblName);
assertNotNull(fieldSchemasFull);
assertEquals(fieldSchemasFull.size(), tbl.getSd().getCols().size()
+ tbl.getPartitionKeys().size());
for (FieldSchema fs : tbl.getSd().getCols()) {
assertTrue(fieldSchemasFull.contains(fs));
}
for (FieldSchema fs : tbl.getPartitionKeys()) {
assertTrue(fieldSchemasFull.contains(fs));
}
client.createTable(tbl2);
if (isThriftClient) {
tbl2 = client.getTable(tbl2.getDbName(), tbl2.getTableName());
}
Table tbl3 = client.getTable(dbName, tblName2);
assertNotNull(tbl3);
assertEquals(tbl3.getDbName(), dbName);
assertEquals(tbl3.getTableName(), tblName2);
assertEquals(tbl3.getSd().getCols().size(), typ1.getFields().size());
assertEquals(tbl3.getSd().isCompressed(), false);
assertEquals(tbl3.getSd().getNumBuckets(), 1);
assertEquals(tbl3.getSd().getLocation(), tbl2.getSd().getLocation());
assertEquals(tbl3.getParameters(), tbl2.getParameters());
fieldSchemas = client.getFields(dbName, tblName2);
assertNotNull(fieldSchemas);
assertEquals(fieldSchemas.size(), tbl2.getSd().getCols().size());
for (FieldSchema fs : tbl2.getSd().getCols()) {
assertTrue(fieldSchemas.contains(fs));
}
fieldSchemasFull = client.getSchema(dbName, tblName2);
assertNotNull(fieldSchemasFull);
assertEquals(fieldSchemasFull.size(), tbl2.getSd().getCols().size()
+ tbl2.getPartitionKeys().size());
for (FieldSchema fs : tbl2.getSd().getCols()) {
assertTrue(fieldSchemasFull.contains(fs));
}
for (FieldSchema fs : tbl2.getPartitionKeys()) {
assertTrue(fieldSchemasFull.contains(fs));
}
assertEquals("Use this for comments etc", tbl2.getSd().getParameters()
.get("test_param_1"));
assertEquals("name", tbl2.getSd().getBucketCols().get(0));
assertTrue("Partition key list is not empty",
(tbl2.getPartitionKeys() == null)
|| (tbl2.getPartitionKeys().size() == 0));
//test get_table_objects_by_name functionality
ArrayList<String> tableNames = new ArrayList<String>();
tableNames.add(tblName2);
tableNames.add(tblName);
tableNames.add(tblName2);
List<Table> foundTables = client.getTableObjectsByName(dbName, tableNames);
assertEquals(foundTables.size(), 2);
for (Table t: foundTables) {
if (t.getTableName().equals(tblName2)) {
assertEquals(t.getSd().getLocation(), tbl2.getSd().getLocation());
} else {
assertEquals(t.getTableName(), tblName);
assertEquals(t.getSd().getLocation(), tbl.getSd().getLocation());
}
assertEquals(t.getSd().getCols().size(), typ1.getFields().size());
assertEquals(t.getSd().isCompressed(), false);
assertEquals(foundTables.get(0).getSd().getNumBuckets(), 1);
assertNotNull(t.getSd().getSerdeInfo());
assertEquals(t.getDbName(), dbName);
}