Package org.apache.hadoop.hive.ql.metadata

Examples of org.apache.hadoop.hive.ql.metadata.Table


  @Override
  public void authorize(Partition part, Privilege[] inputRequiredPriv,
      Privilege[] outputRequiredPriv) throws HiveException {

    //if the partition does not have partition level privilege, go to table level.
    Table table = part.getTable();
    if (table.getParameters().get("PARTITION_LEVEL_PRIVILEGE") == null || ("FALSE"
        .equalsIgnoreCase(table.getParameters().get(
            "PARTITION_LEVEL_PRIVILEGE")))) {
      this.authorize(part.getTable(), inputRequiredPriv, outputRequiredPriv);
      return;
    }
   
View Full Code Here



    Warehouse wh = new Warehouse(cntxt.getConf());
    if(!isDBOp){
      // Do validations for table path.
      Table tbl;
      try{
        tbl = cntxt.getHive().getTable(name);
      }
      catch(InvalidTableException ite){
        // Table itself doesn't exist in metastore, nothing to validate.
        return;
      }
      Path path = tbl.getPath();
      if(path != null){
        AuthUtils.authorize(wh.getDnsPath(path), action, cntxt.getConf());
      } else{
        // This will happen, if table exists in metastore for a given
        // tablename, but has no path associated with it, so there is nothing to check.
View Full Code Here

    howlProps.put(HowlConstants.HOWL_ISD_CLASS, inDriver);
    howlProps.put(HowlConstants.HOWL_OSD_CLASS, outDriver);

    try {
      Hive db = context.getHive();
      Table tbl = db.getTable(tableName);
      if(partSpec == null){
        // File format is for table; not for partition.
        tbl.getTTable().getParameters().putAll(howlProps);
        db.alterTable(tableName, tbl);
      }else{
        Partition part = db.getPartition(tbl,partSpec,false);
        Map<String,String> partParams = part.getParameters();
        if(partParams == null){
View Full Code Here

            .getParseContext().getTopOps().entrySet()) {
          Operator<? extends Serializable> topOp = topOpMap.getValue();
          if (topOp instanceof TableScanOperator
              && tsoTopMap.containsKey(topOp)) {
            TableScanOperator tableScanOp = (TableScanOperator) topOp;
            Table tbl = tsoTopMap.get(tableScanOp);
            List<Integer> neededColumnIds = tableScanOp.getNeededColumnIDs();
            List<FieldSchema> columns = tbl.getCols();
            List<String> cols = new ArrayList<String>();
            if (neededColumnIds != null && neededColumnIds.size() > 0) {
              for (int i = 0; i < neededColumnIds.size(); i++) {
                cols.add(columns.get(neededColumnIds.get(i)).getName());
              }
            } else {
              for (int i = 0; i < columns.size(); i++) {
                cols.add(columns.get(i).getName());
              }
            }
            if (tbl.isPartitioned()) {
              String alias_id = topOpMap.getKey();
              PrunedPartitionList partsList = PartitionPruner.prune(parseCtx
                  .getTopToTable().get(topOp), parseCtx.getOpToPartPruner()
                  .get(topOp), parseCtx.getConf(), alias_id, parseCtx
                  .getPrunedPartitions());
View Full Code Here

    FsPermission perms = FsPermission.valueOf(permsStr);
    if(!tblName.isEmpty()){
      Hive db = null;
      try{
        db = Hive.get();
        Table tbl =  db.getTable(tblName);
        Path tblPath = tbl.getPath();

        FileSystem fs = tblPath.getFileSystem(conf);
        if(null != perms){
          fs.setPermission(tblPath, perms);
        }
View Full Code Here

          IgnoreKeyTextOutputFormat.class);
    }
    db.createTable("src_sequencefile", cols, null,
        SequenceFileInputFormat.class, SequenceFileOutputFormat.class);

    Table srcThrift = new Table(db.getCurrentDatabase(), "src_thrift");
    srcThrift.setInputFormatClass(SequenceFileInputFormat.class.getName());
    srcThrift.setOutputFormatClass(SequenceFileOutputFormat.class.getName());
    srcThrift.setSerializationLib(ThriftDeserializer.class.getName());
    srcThrift.setSerdeParam(Constants.SERIALIZATION_CLASS, Complex.class
        .getName());
    srcThrift.setSerdeParam(Constants.SERIALIZATION_FORMAT,
        TBinaryProtocol.class.getName());
    db.createTable(srcThrift);

    LinkedList<String> json_cols = new LinkedList<String>();
    json_cols.add("json");
View Full Code Here

    db.createTable("dest2", cols, null, TextInputFormat.class,
        IgnoreKeyTextOutputFormat.class);

    db.createTable("dest3", cols, part_cols, TextInputFormat.class,
        IgnoreKeyTextOutputFormat.class);
    Table dest3 = db.getTable("dest3");

    HashMap<String, String> part_spec = new HashMap<String, String>();
    part_spec.put("ds", "2008-04-08");
    part_spec.put("hr", "12");
    db.createPartition(dest3, part_spec);
View Full Code Here

   * @throws HiveException
   */
  private int addPartition(Hive db, AddPartitionDesc addPartitionDesc)
    throws HiveException {
   
    Table tbl = db.getTable(addPartitionDesc.getDbName(),
        addPartitionDesc.getTableName());
   
    if(addPartitionDesc.getLocation() == null) {
      db.createPartition(tbl, addPartitionDesc.getPartSpec());
    } else {
      //set partition path relative to table
      db.createPartition(tbl, addPartitionDesc.getPartSpec(),
          new Path(tbl.getPath(), addPartitionDesc.getLocation()));
    }
   
    return 0;
  }
View Full Code Here

   */
  private int showPartitions(Hive db, FileSystem fs,
      showPartitionsDesc showParts) throws HiveException {
    // get the partitions for the table and populate the output
    String tabName = showParts.getTabName();
    Table tbl = null;
    List<String> parts = null;

    tbl = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tabName);

    if (!tbl.isPartitioned()) {
      console.printError("Table " + tabName + " is not a partitioned table");
      return 1;
    }

    parts = db.getPartitionNames(MetaStoreUtils.DEFAULT_DATABASE_NAME, tbl
        .getName(), Short.MAX_VALUE);

    // write the results in the file
    try {
      DataOutput outStream = (DataOutput) fs.create(showParts.getResFile());
View Full Code Here

    String colPath = descTbl.getTableName();
    String tableName = colPath.substring(0,
        colPath.indexOf('.') == -1 ? colPath.length() : colPath.indexOf('.'));

    // describe the table - populate the output stream
    Table tbl = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName, false);
    Partition part = null;
    try {
      if (tbl == null) {
        DataOutput outStream = (DataOutput) fs.open(descTbl.getResFile());
        String errMsg = "Table " + tableName + " does not exist";
        outStream.write(errMsg.getBytes("UTF-8"));
        ((FSDataOutputStream) outStream).close();
        return 0;
      }
      if (descTbl.getPartSpec() != null) {
        part = db.getPartition(tbl, descTbl.getPartSpec(), false);
        if (part == null) {
          DataOutput outStream = (DataOutput) fs.open(descTbl.getResFile());
          String errMsg = "Partition " + descTbl.getPartSpec() + " for table "
              + tableName + " does not exist";
          outStream.write(errMsg.getBytes("UTF-8"));
          ((FSDataOutputStream) outStream).close();
          return 0;
        }
        tbl = part.getTable();
      }
    } catch (FileNotFoundException e) {
      LOG.info("describe table: " + StringUtils.stringifyException(e));
      return 1;
    } catch (IOException e) {
      LOG.info("describe table: " + StringUtils.stringifyException(e));
      return 1;
    }

    try {

      LOG.info("DDLTask: got data for " + tbl.getName());

      List<FieldSchema> cols = null;
      if (colPath.equals(tableName)) {
        cols = tbl.getCols();
        if (part != null) {
          cols = part.getTPartition().getSd().getCols();
        }
      } else {
        cols = Hive.getFieldsFromDeserializer(colPath, tbl.getDeserializer());
      }
      DataOutput outStream = (DataOutput)fs.create(descTbl.getResFile());
      Iterator<FieldSchema> iterCols = cols.iterator();
      while (iterCols.hasNext()) {
        // create a row per column
        FieldSchema col = iterCols.next();
        outStream.writeBytes(col.getName());
        outStream.write(separator);
        outStream.writeBytes(col.getType());
        outStream.write(separator);
        outStream.writeBytes(col.getComment() == null ? "" : col.getComment());
        outStream.write(terminator);
      }

      if (tableName.equals(colPath)) {
        // also return the partitioning columns
        List<FieldSchema> partCols = tbl.getPartCols();
        Iterator<FieldSchema> iterPartCols = partCols.iterator();
        while (iterPartCols.hasNext()) {
          FieldSchema col = iterPartCols.next();
          outStream.writeBytes(col.getName());
          outStream.write(separator);
          outStream.writeBytes(col.getType());
          outStream.write(separator);
          outStream.writeBytes(col.getComment() == null ? "" : col.getComment());
          outStream.write(terminator);
        }

        // if extended desc table then show the complete details of the table
        if (descTbl.isExt()) {
          // add empty line
          outStream.write(terminator);
          if (part != null) {
            // show partition information
            outStream.writeBytes("Detailed Partition Information");
            outStream.write(separator);
            outStream.writeBytes(part.getTPartition().toString());
            outStream.write(separator);
            // comment column is empty
            outStream.write(terminator);
          } else {
            // show table information
            outStream.writeBytes("Detailed Table Information");
            outStream.write(separator);
            outStream.writeBytes(tbl.getTTable().toString());
            outStream.write(separator);
            // comment column is empty
            outStream.write(terminator);
          }
        }
      }

      LOG.info("DDLTask: written data for " + tbl.getName());
      ((FSDataOutputStream) outStream).close();

    } catch (FileNotFoundException e) {
      LOG.info("describe table: " + StringUtils.stringifyException(e));
      return 1;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.metadata.Table

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.