Package org.apache.hadoop.hive.metastore.api

Examples of org.apache.hadoop.hive.metastore.api.Table$Isset


              }
            }
          } else if (hiveObject.getObjectType() == HiveObjectType.PARTITION) {

            boolean found = false;
            Table tabObj = this.getTable(hiveObject.getDbName(), hiveObject.getObjectName());
            String partName = null;
            if (hiveObject.getPartValues() != null) {
              partName = Warehouse.makePartName(tabObj.getPartitionKeys(), hiveObject.getPartValues());
            }
            List<MPartitionPrivilege> partitionGrants = this
                .listPrincipalPartitionGrants(userName, principalType,
                    hiveObject.getDbName(), hiveObject.getObjectName(), partName);
            for (String privilege : privs) {
              for (MPartitionPrivilege partGrant : partitionGrants) {
                String partPriv = partGrant.getPrivilege();
                if (partPriv.equalsIgnoreCase(privilege)) {
                  found = true;
                  persistentObjs.add(partGrant);
                  break;
                }
              }
              if (!found) {
                throw new InvalidObjectException("No grant (" + privilege
                    + ") found " + " on table " + tabObj.getTableName()
                    + ", partition is " + partName + ", database is " + tabObj.getDbName());
              }
            }
          } else if (hiveObject.getObjectType() == HiveObjectType.COLUMN) {

            Table tabObj = this.getTable(hiveObject.getDbName(), hiveObject
                .getObjectName());
            String partName = null;
            if (hiveObject.getPartValues() != null) {
              partName = Warehouse.makePartName(tabObj.getPartitionKeys(),
                  hiveObject.getPartValues());
            }

            if (partName != null) {
              List<MPartitionColumnPrivilege> mSecCol = listPrincipalPartitionColumnGrants(
                  userName, principalType, hiveObject.getDbName(), hiveObject
                      .getObjectName(), partName, hiveObject.getColumnName());
              boolean found = false;
              if (mSecCol != null) {
                for (String privilege : privs) {
                  for (MPartitionColumnPrivilege col : mSecCol) {
                    String colPriv = col.getPrivilege();
                    if (colPriv.equalsIgnoreCase(privilege)) {
                      found = true;
                      persistentObjs.add(col);
                      break;
                    }
                  }
                  if (!found) {
                    throw new InvalidObjectException("No grant (" + privilege
                        + ") found " + " on table " + tabObj.getTableName()
                        + ", partition is " + partName + ", column name = "
                        + hiveObject.getColumnName() + ", database is "
                        + tabObj.getDbName());
                  }
                }
              }
            } else {
              List<MTableColumnPrivilege> mSecCol = listPrincipalTableColumnGrants(
                  userName, principalType, hiveObject.getDbName(), hiveObject
                      .getObjectName(), hiveObject.getColumnName());
              boolean found = false;
              if (mSecCol != null) {
                for (String privilege : privs) {
                  for (MTableColumnPrivilege col : mSecCol) {
                    String colPriv = col.getPrivilege();
                    if (colPriv.equalsIgnoreCase(privilege)) {
                      found = true;
                      persistentObjs.add(col);
                      break;
                    }
                  }
                  if (!found) {
                    throw new InvalidObjectException("No grant (" + privilege
                        + ") found " + " on table " + tabObj.getTableName()
                        + ", column name = "
                        + hiveObject.getColumnName() + ", database is "
                        + tabObj.getDbName());
                  }
                }
              }
            }
View Full Code Here


    LOG.debug("Begin Executing isPartitionMarkedForEvent");
    try{
    openTransaction();
    Query query = pm.newQuery(MPartitionEvent.class, "dbName == t1 && tblName == t2 && partName == t3 && eventType == t4");
    query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, int t4");
    Table tbl = getTable(dbName, tblName); // Make sure dbName and tblName are valid.
    if(null == tbl) {
      throw new UnknownTableException("Table: "+ tblName + " is not found.");
    }
    partEvents = (Collection<MPartitionEvent>) query.executeWithArray(dbName, tblName, getPartitionStr(tbl, partName), evtType.getValue());
    pm.retrieveAll(partEvents);
View Full Code Here

  public Table markPartitionForEvent(String dbName, String tblName, Map<String,String> partName,
      PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException {

    LOG.debug("Begin executing markPartitionForEvent");
    boolean success = false;
    Table tbl = null;
    try{
    openTransaction();
    tbl = getTable(dbName, tblName); // Make sure dbName and tblName are valid.
    if(null == tbl) {
      throw new UnknownTableException("Table: "+ tblName + " is not found.");
View Full Code Here

  public void onCreateTable(CreateTableEvent tableEvent) throws MetaException {
    // Subscriber can get notification about addition of a table in HCAT
    // by listening on a topic named "HCAT" and message selector string
    // as "HCAT_EVENT = HCAT_ADD_TABLE"
    if (tableEvent.getStatus()) {
      Table tbl = tableEvent.getTable();
      HMSHandler handler = tableEvent.getHandler();
      HiveConf conf = handler.getHiveConf();
      Table newTbl;
      try {
        newTbl = handler.get_table(tbl.getDbName(), tbl.getTableName())
          .deepCopy();
        newTbl.getParameters().put(
          HCatConstants.HCAT_MSGBUS_TOPIC_NAME,
          getTopicPrefix(conf) + "." + newTbl.getDbName().toLowerCase() + "."
            + newTbl.getTableName().toLowerCase());
        handler.alter_table(newTbl.getDbName(), newTbl.getTableName(), newTbl);
      } catch (InvalidOperationException e) {
        MetaException me = new MetaException(e.toString());
        me.initCause(e);
        throw me;
      } catch (NoSuchObjectException e) {
        MetaException me = new MetaException(e.toString());
        me.initCause(e);
        throw me;
      }
      String topicName = getTopicPrefix(conf) + "." + newTbl.getDbName().toLowerCase();
      send(messageFactory.buildCreateTableMessage(newTbl), topicName);
    }
  }
View Full Code Here

    // Datanucleus throws NPE when we try to serialize a table object
    // retrieved from metastore. To workaround that we reset following objects

    if (tableEvent.getStatus()) {
      Table table = tableEvent.getTable();
      String topicName = getTopicPrefix(tableEvent.getHandler().getHiveConf()) + "." + table.getDbName().toLowerCase();
      send(messageFactory.buildDropTableMessage(table), topicName);
    }
  }
View Full Code Here

          new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, ""));
      typ1.getFields().add(
          new FieldSchema("income", serdeConstants.INT_TYPE_NAME, ""));
      client.createType(typ1);

      Table tbl = new Table();
      tbl.setDbName(dbName);
      tbl.setTableName(tblName);
      StorageDescriptor sd = new StorageDescriptor();
      tbl.setSd(sd);
      sd.setCols(typ1.getFields());
      sd.setCompressed(false);
      sd.setNumBuckets(1);
      sd.setParameters(new HashMap<String, String>());
      sd.getParameters().put("test_param_1", "Use this for comments etc");
      sd.setBucketCols(new ArrayList<String>(2));
      sd.getBucketCols().add("name");
      sd.setSerdeInfo(new SerDeInfo());
      sd.getSerdeInfo().setName(tbl.getTableName());
      sd.getSerdeInfo().setParameters(new HashMap<String, String>());
      sd.getSerdeInfo().getParameters()
          .put(serdeConstants.SERIALIZATION_FORMAT, "1");
      sd.setSortCols(new ArrayList<Order>());
      sd.setStoredAsSubDirectories(false);

      //skewed information
      SkewedInfo skewInfor = new SkewedInfo();
      skewInfor.setSkewedColNames(Arrays.asList("name"));
      List<String> skv = Arrays.asList("1");
      skewInfor.setSkewedColValues(Arrays.asList(skv));
      Map<List<String>, String> scvlm = new HashMap<List<String>, String>();
      scvlm.put(skv, "location1");
      skewInfor.setSkewedColValueLocationMaps(scvlm);
      sd.setSkewedInfo(skewInfor);

      tbl.setPartitionKeys(new ArrayList<FieldSchema>(2));
      tbl.getPartitionKeys().add(
          new FieldSchema("ds", serdeConstants.STRING_TYPE_NAME, ""));
      tbl.getPartitionKeys().add(
          new FieldSchema("hr", serdeConstants.STRING_TYPE_NAME, ""));

      client.createTable(tbl);

      if (isThriftClient) {
        // the createTable() above does not update the location in the 'tbl'
        // object when the client is a thrift client and the code below relies
        // on the location being present in the 'tbl' object - so get the table
        // from the metastore
        tbl = client.getTable(dbName, tblName);
      }

      assertEquals(dbPermission, fs.getFileStatus(new Path(tbl.getSd().getLocation()))
          .getPermission());

      Partition part = makePartitionObject(dbName, tblName, vals, tbl, "/part1");
      Partition part2 = makePartitionObject(dbName, tblName, vals2, tbl, "/part2");
      Partition part3 = makePartitionObject(dbName, tblName, vals3, tbl, "/part3");
      Partition part4 = makePartitionObject(dbName, tblName, vals4, tbl, "/part4");

      // check if the partition exists (it shouldn't)
      boolean exceptionThrown = false;
      try {
        Partition p = client.getPartition(dbName, tblName, vals);
      } catch(Exception e) {
        assertEquals("partition should not have existed",
            NoSuchObjectException.class, e.getClass());
        exceptionThrown = true;
      }
      assertTrue("getPartition() should have thrown NoSuchObjectException", exceptionThrown);
      Partition retp = client.add_partition(part);
      assertNotNull("Unable to create partition " + part, retp);
      assertEquals(dbPermission, fs.getFileStatus(new Path(retp.getSd().getLocation()))
          .getPermission());
      Partition retp2 = client.add_partition(part2);
      assertNotNull("Unable to create partition " + part2, retp2);
      assertEquals(dbPermission, fs.getFileStatus(new Path(retp2.getSd().getLocation()))
          .getPermission());
      Partition retp3 = client.add_partition(part3);
      assertNotNull("Unable to create partition " + part3, retp3);
      assertEquals(dbPermission, fs.getFileStatus(new Path(retp3.getSd().getLocation()))
          .getPermission());
      Partition retp4 = client.add_partition(part4);
      assertNotNull("Unable to create partition " + part4, retp4);
      assertEquals(dbPermission, fs.getFileStatus(new Path(retp4.getSd().getLocation()))
          .getPermission());

      Partition part_get = client.getPartition(dbName, tblName, part.getValues());
      if(isThriftClient) {
        // since we are using thrift, 'part' will not have the create time and
        // last DDL time set since it does not get updated in the add_partition()
        // call - likewise part2 and part3 - set it correctly so that equals check
        // doesn't fail
        adjust(client, part, dbName, tblName);
        adjust(client, part2, dbName, tblName);
        adjust(client, part3, dbName, tblName);
      }
      assertTrue("Partitions are not same", part.equals(part_get));

      String partName = "ds=2008-07-01 14%3A13%3A12/hr=14";
      String part2Name = "ds=2008-07-01 14%3A13%3A12/hr=15";
      String part3Name ="ds=2008-07-02 14%3A13%3A12/hr=15";
      String part4Name ="ds=2008-07-03 14%3A13%3A12/hr=151";

      part_get = client.getPartition(dbName, tblName, partName);
      assertTrue("Partitions are not the same", part.equals(part_get));

      // Test partition listing with a partial spec - ds is specified but hr is not
      List<String> partialVals = new ArrayList<String>();
      partialVals.add(vals.get(0));
      Set<Partition> parts = new HashSet<Partition>();
      parts.add(part);
      parts.add(part2);

      List<Partition> partial = client.listPartitions(dbName, tblName, partialVals,
          (short) -1);
      assertTrue("Should have returned 2 partitions", partial.size() == 2);
      assertTrue("Not all parts returned", partial.containsAll(parts));

      Set<String> partNames = new HashSet<String>();
      partNames.add(partName);
      partNames.add(part2Name);
      List<String> partialNames = client.listPartitionNames(dbName, tblName, partialVals,
          (short) -1);
      assertTrue("Should have returned 2 partition names", partialNames.size() == 2);
      assertTrue("Not all part names returned", partialNames.containsAll(partNames));

      partNames.add(part3Name);
      partNames.add(part4Name);
      partialVals.clear();
      partialVals.add("");
      partialNames = client.listPartitionNames(dbName, tblName, partialVals, (short) -1);
      assertTrue("Should have returned 4 partition names", partialNames.size() == 4);
      assertTrue("Not all part names returned", partialNames.containsAll(partNames));

      // Test partition listing with a partial spec - hr is specified but ds is not
      parts.clear();
      parts.add(part2);
      parts.add(part3);

      partialVals.clear();
      partialVals.add("");
      partialVals.add(vals2.get(1));

      partial = client.listPartitions(dbName, tblName, partialVals, (short) -1);
      assertEquals("Should have returned 2 partitions", 2, partial.size());
      assertTrue("Not all parts returned", partial.containsAll(parts));

      partNames.clear();
      partNames.add(part2Name);
      partNames.add(part3Name);
      partialNames = client.listPartitionNames(dbName, tblName, partialVals,
          (short) -1);
      assertEquals("Should have returned 2 partition names", 2, partialNames.size());
      assertTrue("Not all part names returned", partialNames.containsAll(partNames));

      // Verify escaped partition names don't return partitions
      exceptionThrown = false;
      try {
        String badPartName = "ds=2008-07-01 14%3A13%3A12/hrs=14";
        client.getPartition(dbName, tblName, badPartName);
      } catch(NoSuchObjectException e) {
        exceptionThrown = true;
      }
      assertTrue("Bad partition spec should have thrown an exception", exceptionThrown);

      Path partPath = new Path(part.getSd().getLocation());


      assertTrue(fs.exists(partPath));
      client.dropPartition(dbName, tblName, part.getValues(), true);
      assertFalse(fs.exists(partPath));

      // Test append_partition_by_name
      client.appendPartition(dbName, tblName, partName);
      Partition part5 = client.getPartition(dbName, tblName, part.getValues());
      assertTrue("Append partition by name failed", part5.getValues().equals(vals));;
      Path part5Path = new Path(part5.getSd().getLocation());
      assertTrue(fs.exists(part5Path));

      // Test drop_partition_by_name
      assertTrue("Drop partition by name failed",
          client.dropPartition(dbName, tblName, partName, true));
      assertFalse(fs.exists(part5Path));

      // add the partition again so that drop table with a partition can be
      // tested
      retp = client.add_partition(part);
      assertNotNull("Unable to create partition " + part, retp);
      assertEquals(dbPermission, fs.getFileStatus(new Path(retp.getSd().getLocation()))
          .getPermission());

      // test add_partitions

      List<String> mvals1 = makeVals("2008-07-04 14:13:12", "14641");
      List<String> mvals2 = makeVals("2008-07-04 14:13:12", "14642");
      List<String> mvals3 = makeVals("2008-07-04 14:13:12", "14643");
      List<String> mvals4 = makeVals("2008-07-04 14:13:12", "14643"); // equal to 3
      List<String> mvals5 = makeVals("2008-07-04 14:13:12", "14645");

      Exception savedException;

      // add_partitions(empty list) : ok, normal operation
      client.add_partitions(new ArrayList<Partition>());

      // add_partitions(1,2,3) : ok, normal operation
      Partition mpart1 = makePartitionObject(dbName, tblName, mvals1, tbl, "/mpart1");
      Partition mpart2 = makePartitionObject(dbName, tblName, mvals2, tbl, "/mpart2");
      Partition mpart3 = makePartitionObject(dbName, tblName, mvals3, tbl, "/mpart3");
      client.add_partitions(Arrays.asList(mpart1,mpart2,mpart3));

      if(isThriftClient) {
        // do DDL time munging if thrift mode
        adjust(client, mpart1, dbName, tblName);
        adjust(client, mpart2, dbName, tblName);
        adjust(client, mpart3, dbName, tblName);
      }
      verifyPartitionsPublished(client, dbName, tblName,
          Arrays.asList(mvals1.get(0)),
          Arrays.asList(mpart1,mpart2,mpart3));

      Partition mpart4 = makePartitionObject(dbName, tblName, mvals4, tbl, "/mpart4");
      Partition mpart5 = makePartitionObject(dbName, tblName, mvals5, tbl, "/mpart5");

      // create dir for /mpart5
      Path mp5Path = new Path(mpart5.getSd().getLocation());
      warehouse.mkdirs(mp5Path);
      assertTrue(fs.exists(mp5Path));
      assertEquals(dbPermission, fs.getFileStatus(mp5Path).getPermission());

      // add_partitions(5,4) : err = duplicate keyvals on mpart4
      savedException = null;
      try {
        client.add_partitions(Arrays.asList(mpart5,mpart4));
      } catch (Exception e) {
        savedException = e;
      } finally {
        assertNotNull(savedException);
      }

      // check that /mpart4 does not exist, but /mpart5 still does.
      assertTrue(fs.exists(mp5Path));
      assertFalse(fs.exists(new Path(mpart4.getSd().getLocation())));

      // add_partitions(5) : ok
      client.add_partitions(Arrays.asList(mpart5));

      if(isThriftClient) {
        // do DDL time munging if thrift mode
        adjust(client, mpart5, dbName, tblName);
      }

      verifyPartitionsPublished(client, dbName, tblName,
          Arrays.asList(mvals1.get(0)),
          Arrays.asList(mpart1,mpart2,mpart3,mpart5));

      //// end add_partitions tests

      client.dropTable(dbName, tblName);

      client.dropType(typeName);

      // recreate table as external, drop partition and it should
      // still exist
      tbl.setParameters(new HashMap<String, String>());
      tbl.getParameters().put("EXTERNAL", "TRUE");
      client.createTable(tbl);
      retp = client.add_partition(part);
      assertTrue(fs.exists(partPath));
      client.dropPartition(dbName, tblName, part.getValues(), true);
      assertTrue(fs.exists(partPath));
View Full Code Here

    ArrayList<FieldSchema> cols = new ArrayList<FieldSchema>(2);
    cols.add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, ""));
    cols.add(new FieldSchema("income", serdeConstants.INT_TYPE_NAME, ""));

    Table tbl = new Table();
    tbl.setDbName(dbName);
    tbl.setTableName(tblName);
    StorageDescriptor sd = new StorageDescriptor();
    tbl.setSd(sd);
    sd.setCols(cols);
    sd.setCompressed(false);
    sd.setParameters(new HashMap<String, String>());
    sd.setSerdeInfo(new SerDeInfo());
    sd.getSerdeInfo().setName(tbl.getTableName());
    sd.getSerdeInfo().setParameters(new HashMap<String, String>());
    sd.getSerdeInfo().getParameters()
        .put(serdeConstants.SERIALIZATION_FORMAT, "1");
    sd.setSortCols(new ArrayList<Order>());

    client.createTable(tbl);

    if (isThriftClient) {
      // the createTable() above does not update the location in the 'tbl'
      // object when the client is a thrift client and the code below relies
      // on the location being present in the 'tbl' object - so get the table
      // from the metastore
      tbl = client.getTable(dbName, tblName);
    }

    ArrayList<FieldSchema> viewCols = new ArrayList<FieldSchema>(1);
    viewCols.add(new FieldSchema("income", serdeConstants.INT_TYPE_NAME, ""));

    ArrayList<FieldSchema> viewPartitionCols = new ArrayList<FieldSchema>(1);
    viewPartitionCols.add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, ""));

    Table view = new Table();
    view.setDbName(dbName);
    view.setTableName(viewName);
    view.setTableType(TableType.VIRTUAL_VIEW.name());
    view.setPartitionKeys(viewPartitionCols);
    view.setViewOriginalText("SELECT income, name FROM " + tblName);
    view.setViewExpandedText("SELECT `" + tblName + "`.`income`, `" + tblName +
        "`.`name` FROM `" + dbName + "`.`" + tblName + "`");
    StorageDescriptor viewSd = new StorageDescriptor();
    view.setSd(viewSd);
    viewSd.setCols(viewCols);
    viewSd.setCompressed(false);
    viewSd.setParameters(new HashMap<String, String>());
    viewSd.setSerdeInfo(new SerDeInfo());
    viewSd.getSerdeInfo().setParameters(new HashMap<String, String>());
View Full Code Here

      ArrayList<FieldSchema> cols = new ArrayList<FieldSchema>(2);
      cols.add(new FieldSchema("name", serdeConstants.STRING_TYPE_NAME, ""));
      cols.add(new FieldSchema("income", serdeConstants.INT_TYPE_NAME, ""));

      Table tbl = new Table();
      tbl.setDbName(dbName);
      tbl.setTableName(tblName);
      StorageDescriptor sd = new StorageDescriptor();
      tbl.setSd(sd);
      sd.setCols(cols);
      sd.setCompressed(false);
      sd.setNumBuckets(1);
      sd.setParameters(new HashMap<String, String>());
      sd.getParameters().put("test_param_1", "Use this for comments etc");
      sd.setBucketCols(new ArrayList<String>(2));
      sd.getBucketCols().add("name");
      sd.setSerdeInfo(new SerDeInfo());
      sd.getSerdeInfo().setName(tbl.getTableName());
      sd.getSerdeInfo().setParameters(new HashMap<String, String>());
      sd.getSerdeInfo().getParameters()
          .put(serdeConstants.SERIALIZATION_FORMAT, "1");
      sd.setSortCols(new ArrayList<Order>());

      tbl.setPartitionKeys(new ArrayList<FieldSchema>(2));
      tbl.getPartitionKeys().add(
          new FieldSchema("ds", serdeConstants.STRING_TYPE_NAME, ""));
      tbl.getPartitionKeys().add(
          new FieldSchema("hr", serdeConstants.INT_TYPE_NAME, ""));

      client.createTable(tbl);

      if (isThriftClient) {
        // the createTable() above does not update the location in the 'tbl'
        // object when the client is a thrift client and the code below relies
        // on the location being present in the 'tbl' object - so get the table
        // from the metastore
        tbl = client.getTable(dbName, tblName);
      }

      Partition part = new Partition();
      part.setDbName(dbName);
      part.setTableName(tblName);
      part.setValues(vals);
      part.setParameters(new HashMap<String, String>());
      part.setSd(tbl.getSd());
      part.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo());
      part.getSd().setLocation(tbl.getSd().getLocation() + "/part1");

      client.add_partition(part);

      Partition part2 = client.getPartition(dbName, tblName, part.getValues());
View Full Code Here

    return new Builder(dbName, tableName, columns);
  }

  Table toHiveTable(HiveConf conf) throws HCatException {

    Table newTable = new Table();
    newTable.setDbName(dbName);
    newTable.setTableName(tableName);
    if (tblProps != null) {
      newTable.setParameters(tblProps);
    }

    if (isExternal) {
      newTable.putToParameters("EXTERNAL", "TRUE");
      newTable.setTableType(TableType.EXTERNAL_TABLE.toString());
    } else {
      newTable.setTableType(TableType.MANAGED_TABLE.toString());
    }

    StorageDescriptor sd = new StorageDescriptor();
    sd.setSerdeInfo(new SerDeInfo());
    if (location != null) {
      sd.setLocation(location);
    }
    if (this.comment != null) {
      newTable.putToParameters("comment", comment);
    }
    if (!StringUtils.isEmpty(fileFormat)) {
      sd.setInputFormat(inputformat);
      sd.setOutputFormat(outputformat);
      if (serde != null) {
        sd.getSerdeInfo().setSerializationLib(serde);
      } else {
        LOG.info("Using LazySimpleSerDe for table " + tableName);
        sd.getSerdeInfo()
          .setSerializationLib(
            org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class
              .getName());
      }
    } else {
      try {
        LOG.info("Creating instance of storage handler to get input/output, serder info.");
        HiveStorageHandler sh = HiveUtils.getStorageHandler(conf,
          storageHandler);
        sd.setInputFormat(sh.getInputFormatClass().getName());
        sd.setOutputFormat(sh.getOutputFormatClass().getName());
        sd.getSerdeInfo().setSerializationLib(
          sh.getSerDeClass().getName());
        newTable.putToParameters(
          org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE,
          storageHandler);
      } catch (HiveException e) {
        throw new HCatException(
          "Exception while creating instance of storage handler",
          e);
      }
    }
    newTable.setSd(sd);
    if (this.partCols != null) {
      ArrayList<FieldSchema> hivePtnCols = new ArrayList<FieldSchema>();
      for (HCatFieldSchema fs : this.partCols) {
        hivePtnCols.add(HCatSchemaUtils.getFieldSchema(fs));
      }
      newTable.setPartitionKeys(hivePtnCols);
    }

    if (this.cols != null) {
      ArrayList<FieldSchema> hiveTblCols = new ArrayList<FieldSchema>();
      for (HCatFieldSchema fs : this.cols) {
        hiveTblCols.add(HCatSchemaUtils.getFieldSchema(fs));
      }
      newTable.getSd().setCols(hiveTblCols);
    }

    if (this.bucketCols != null) {
      newTable.getSd().setBucketCols(bucketCols);
      newTable.getSd().setNumBuckets(numBuckets);
    }

    if (this.sortCols != null) {
      newTable.getSd().setSortCols(sortCols);
    }

    newTable.setCreateTime((int) (System.currentTimeMillis() / 1000));
    newTable.setLastAccessTimeIsSet(false);
    return newTable;
  }
View Full Code Here

    assertNotNull((client.getDatabase(dbName).getLocationUri()));

    List<FieldSchema> fields = new ArrayList<FieldSchema>();
    fields.add(new FieldSchema("colname", serdeConstants.STRING_TYPE_NAME, ""));

    Table tbl = new Table();
    tbl.setDbName(dbName);
    tbl.setTableName(tblName);
    StorageDescriptor sd = new StorageDescriptor();
    sd.setCols(fields);
    tbl.setSd(sd);

    //sd.setLocation("hdfs://tmp");
    sd.setInputFormat(RCFileInputFormat.class.getName());
    sd.setOutputFormat(RCFileOutputFormat.class.getName());
    sd.setParameters(new HashMap<String, String>());
    sd.getParameters().put("test_param_1", "Use this for comments etc");
    //sd.setBucketCols(new ArrayList<String>(2));
    //sd.getBucketCols().add("name");
    sd.setSerdeInfo(new SerDeInfo());
    sd.getSerdeInfo().setName(tbl.getTableName());
    sd.getSerdeInfo().setParameters(new HashMap<String, String>());
    sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
    sd.getSerdeInfo().setSerializationLib(
        org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
    tbl.setPartitionKeys(fields);

    Map<String, String> tableParams = new HashMap<String, String>();
    tableParams.put("hcat.testarg", "testArgValue");

    tbl.setParameters(tableParams);

    client.createTable(tbl);
    Path tblPath = new Path(client.getTable(dbName, tblName).getSd().getLocation());
    assertTrue(tblPath.getFileSystem(hiveConf).mkdirs(new Path(tblPath, "colname=p1")));
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.metastore.api.Table$Isset

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.