Package org.apache.hadoop.hive.ql.metadata

Examples of org.apache.hadoop.hive.ql.metadata.Partition


    validateAlterTableType(tbl, AlterTableDesc.AlterTableTypes.ADDPARTITION);

    // If the add partition was created with IF NOT EXISTS, then we should
    // not throw an error if the specified part does exist.
    Partition checkPart = db.getPartition(tbl, addPartitionDesc.getPartSpec(), false);
    if (checkPart != null && addPartitionDesc.getIfNotExists()) {
      return 0;
    }

    if (addPartitionDesc.getLocation() == null) {
      db.createPartition(tbl, addPartitionDesc.getPartSpec());
    } else {
      // set partition path relative to table
      db.createPartition(tbl, addPartitionDesc.getPartSpec(), new Path(tbl
          .getPath(), addPartitionDesc.getLocation()));
    }

    Partition part = db
        .getPartition(tbl, addPartitionDesc.getPartSpec(), false);
    work.getOutputs().add(new WriteEntity(part));

    return 0;
  }
View Full Code Here


        throw new HiveException("Uable to update table");
      }
      work.getInputs().add(new ReadEntity(tbl));
      work.getOutputs().add(new WriteEntity(tbl));
    } else {
      Partition part = db.getPartition(tbl, touchDesc.getPartSpec(), false);
      if (part == null) {
        throw new HiveException("Specified partition does not exist");
      }
      try {
        db.alterPartition(tblName, part);
View Full Code Here

    Table tbl = db.getTable(dbName, tblName);
    validateAlterTableType(tbl, AlterTableDesc.AlterTableTypes.ARCHIVE);

    Map<String, String> partSpec = simpleDesc.getPartSpec();
    Partition p = db.getPartition(tbl, partSpec, false);

    if (tbl.getTableType() != TableType.MANAGED_TABLE) {
      throw new HiveException("ARCHIVE can only be performed on managed tables");
    }

    if (p == null) {
      throw new HiveException("Specified partition does not exist");
    }

    if (isArchived(p)) {
      // If there were a failure right after the metadata was updated in an
      // archiving operation, it's possible that the original, unarchived files
      // weren't deleted.
      Path originalDir = new Path(getOriginalLocation(p));
      Path leftOverIntermediateOriginal = new Path(originalDir.getParent(),
          originalDir.getName() + INTERMEDIATE_ORIGINAL_DIR_SUFFIX);

      if (pathExists(leftOverIntermediateOriginal)) {
        console.printInfo("Deleting " + leftOverIntermediateOriginal +
        " left over from a previous archiving operation");
        deleteDir(leftOverIntermediateOriginal);
      }

      throw new HiveException("Specified partition is already archived");
    }

    Path originalDir = p.getPartitionPath();
    Path intermediateArchivedDir = new Path(originalDir.getParent(),
        originalDir.getName() + INTERMEDIATE_ARCHIVED_DIR_SUFFIX);
    Path intermediateOriginalDir = new Path(originalDir.getParent(),
        originalDir.getName() + INTERMEDIATE_ORIGINAL_DIR_SUFFIX);
    String archiveName = "data.har";
View Full Code Here

    if (simpleDesc.getPartSpec() == null) {
      throw new HiveException("ARCHIVE is for partitions only");
    }

    Map<String, String> partSpec = simpleDesc.getPartSpec();
    Partition p = db.getPartition(tbl, partSpec, false);

    if (tbl.getTableType() != TableType.MANAGED_TABLE) {
      throw new HiveException("UNARCHIVE can only be performed on managed tables");
    }

    if (p == null) {
      throw new HiveException("Specified partition does not exist");
    }

    if (!isArchived(p)) {
      Path location = new Path(p.getLocation());
      Path leftOverArchiveDir = new Path(location.getParent(),
          location.getName() + INTERMEDIATE_ARCHIVED_DIR_SUFFIX);

      if (pathExists(leftOverArchiveDir)) {
        console.printInfo("Deleting " + leftOverArchiveDir + " left over " +
        "from a previous unarchiving operation");
        deleteDir(leftOverArchiveDir);
      }

      throw new HiveException("Specified partition is not archived");
    }

    Path originalLocation = new Path(getOriginalLocation(p));
    Path sourceDir = new Path(p.getLocation());
    Path intermediateArchiveDir = new Path(originalLocation.getParent(),
        originalLocation.getName() + INTERMEDIATE_ARCHIVED_DIR_SUFFIX);
    Path intermediateExtractedDir = new Path(originalLocation.getParent(),
        originalLocation.getName() + INTERMEDIATE_EXTRACTED_DIR_SUFFIX);
View Full Code Here

        return 1;
      }
      return 0;
    }

    Partition par = db.getPartition(tbl, partSpec, false);
    if (par == null) {
      throw new HiveException("Partition " + partSpec + " for table " + tabName + " does not exist");
    }
    HiveLock lck = lockMgr.lock(new HiveLockObject(par, lockData), mode, true, 0, 0);
    if (lck == null) {
View Full Code Here

    if  (partSpec == null) {
      obj = new HiveLockObject(tbl, null);
    }
    else {
      Partition par = db.getPartition(tbl, partSpec, false);
      if (par == null) {
        throw new HiveException("Partition " + partSpec + " for table " + tabName + " does not exist");
      }
      obj = new HiveLockObject(par, null);
    }
View Full Code Here

   */
  private int showTableStatus(Hive db, ShowTableStatusDesc showTblStatus) throws HiveException {
    // get the tables for the desired pattenn - populate the output stream
    List<Table> tbls = new ArrayList<Table>();
    Map<String, String> part = showTblStatus.getPartSpec();
    Partition par = null;
    if (part != null) {
      Table tbl = db.getTable(showTblStatus.getDbName(), showTblStatus.getPattern());
      par = db.getPartition(tbl, part, false);
      if (par == null) {
        throw new HiveException("Partition " + part + " for table "
            + showTblStatus.getPattern() + " does not exist.");
      }
      tbls.add(tbl);
    } else {
      LOG.info("pattern: " + showTblStatus.getPattern());
      List<String> tblStr = db.getTablesForDb(showTblStatus.getDbName(),
          showTblStatus.getPattern());
      SortedSet<String> sortedTbls = new TreeSet<String>(tblStr);
      Iterator<String> iterTbls = sortedTbls.iterator();
      while (iterTbls.hasNext()) {
        // create a row per table name
        String tblName = iterTbls.next();
        Table tbl = db.getTable(showTblStatus.getDbName(), tblName);
        tbls.add(tbl);
      }
      LOG.info("results : " + tblStr.size());
    }

    // write the results in the file
    try {
      Path resFile = new Path(showTblStatus.getResFile());
      FileSystem fs = resFile.getFileSystem(conf);
      DataOutput outStream = fs.create(resFile);

      Iterator<Table> iterTables = tbls.iterator();
      while (iterTables.hasNext()) {
        // create a row per table name
        Table tbl = iterTables.next();
        String tableName = tbl.getTableName();
        String tblLoc = null;
        String inputFormattCls = null;
        String outputFormattCls = null;
        if (part != null) {
          if (par != null) {
            tblLoc = par.getDataLocation().toString();
            inputFormattCls = par.getInputFormatClass().getName();
            outputFormattCls = par.getOutputFormatClass().getName();
          }
        } else {
          tblLoc = tbl.getDataLocation().toString();
          inputFormattCls = tbl.getInputFormatClass().getName();
          outputFormattCls = tbl.getOutputFormatClass().getName();
        }

        String owner = tbl.getOwner();
        List<FieldSchema> cols = tbl.getCols();
        String ddlCols = MetaStoreUtils.getDDLFromFieldSchema("columns", cols);
        boolean isPartitioned = tbl.isPartitioned();
        String partitionCols = "";
        if (isPartitioned) {
          partitionCols = MetaStoreUtils.getDDLFromFieldSchema(
              "partition_columns", tbl.getPartCols());
        }

        outStream.writeBytes("tableName:" + tableName);
        outStream.write(terminator);
        outStream.writeBytes("owner:" + owner);
        outStream.write(terminator);
        outStream.writeBytes("location:" + tblLoc);
        outStream.write(terminator);
        outStream.writeBytes("inputformat:" + inputFormattCls);
        outStream.write(terminator);
        outStream.writeBytes("outputformat:" + outputFormattCls);
        outStream.write(terminator);
        outStream.writeBytes("columns:" + ddlCols);
        outStream.write(terminator);
        outStream.writeBytes("partitioned:" + isPartitioned);
        outStream.write(terminator);
        outStream.writeBytes("partitionColumns:" + partitionCols);
        outStream.write(terminator);
        // output file system information
        Path tablLoc = tbl.getPath();
        List<Path> locations = new ArrayList<Path>();
        if (isPartitioned) {
          if (par == null) {
            for (Partition curPart : db.getPartitions(tbl)) {
              locations.add(new Path(curPart.getTPartition().getSd()
                  .getLocation()));
            }
          } else {
            locations.add(new Path(par.getTPartition().getSd().getLocation()));
          }
        } else {
          locations.add(tablLoc);
        }
        writeFileSystemStats(outStream, locations, tablLoc, false, 0);
View Full Code Here

    String tableName = colPath.substring(0,
        colPath.indexOf('.') == -1 ? colPath.length() : colPath.indexOf('.'));

    // describe the table - populate the output stream
    Table tbl = db.getTable(db.getCurrentDatabase(), tableName, false);
    Partition part = null;
    try {
      Path resFile = new Path(descTbl.getResFile());
      if (tbl == null) {
        FileSystem fs = resFile.getFileSystem(conf);
        DataOutput outStream = (DataOutput) fs.open(resFile);
        String errMsg = "Table " + tableName + " does not exist";
        outStream.write(errMsg.getBytes("UTF-8"));
        ((FSDataOutputStream) outStream).close();
        return 0;
      }
      if (descTbl.getPartSpec() != null) {
        part = db.getPartition(tbl, descTbl.getPartSpec(), false);
        if (part == null) {
          FileSystem fs = resFile.getFileSystem(conf);
          DataOutput outStream = (DataOutput) fs.open(resFile);
          String errMsg = "Partition " + descTbl.getPartSpec() + " for table "
              + tableName + " does not exist";
          outStream.write(errMsg.getBytes("UTF-8"));
          ((FSDataOutputStream) outStream).close();
          return 0;
        }
        tbl = part.getTable();
      }
    } catch (FileNotFoundException e) {
      LOG.info("describe table: " + stringifyException(e));
      return 1;
    } catch (IOException e) {
      LOG.info("describe table: " + stringifyException(e));
      return 1;
    }

    try {

      LOG.info("DDLTask: got data for " + tbl.getTableName());

      Path resFile = new Path(descTbl.getResFile());
      FileSystem fs = resFile.getFileSystem(conf);
      DataOutput outStream = fs.create(resFile);

      if (colPath.equals(tableName)) {
        if (!descTbl.isFormatted()) {
          List<FieldSchema> cols = tbl.getCols();
          if (tableName.equals(colPath)) {
            cols.addAll(tbl.getPartCols());
          }
          outStream.writeBytes(MetaDataFormatUtils.displayColsUnformatted(cols));
        } else {
          outStream.writeBytes(MetaDataFormatUtils.getAllColumnsInformation(tbl));
        }
      } else {
        List<FieldSchema> cols = Hive.getFieldsFromDeserializer(colPath, tbl.getDeserializer());
        if (descTbl.isFormatted()) {
          outStream.writeBytes(MetaDataFormatUtils.getAllColumnsInformation(cols));
        } else {
          outStream.writeBytes(MetaDataFormatUtils.displayColsUnformatted(cols));
        }
      }

      if (tableName.equals(colPath)) {

        if (descTbl.isFormatted()) {
          if (part != null) {
            outStream.writeBytes(MetaDataFormatUtils.getPartitionInformation(part));
          } else {
            outStream.writeBytes(MetaDataFormatUtils.getTableInformation(tbl));
          }
        }

        // if extended desc table then show the complete details of the table
        if (descTbl.isExt()) {
          // add empty line
          outStream.write(terminator);
          if (part != null) {
            // show partition information
            outStream.writeBytes("Detailed Partition Information");
            outStream.write(separator);
            outStream.writeBytes(part.getTPartition().toString());
            outStream.write(separator);
            // comment column is empty
            outStream.write(terminator);
          } else {
            // show table information
View Full Code Here

   */
  private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException {
    // alter the table
    Table tbl = db.getTable(alterTbl.getOldName());

    Partition part = null;
    if(alterTbl.getPartSpec() != null) {
      part = db.getPartition(tbl, alterTbl.getPartSpec(), false);
      if(part == null) {
        console.printError("Partition : " + alterTbl.getPartSpec().toString()
            + " does not exist.");
        return 1;
      }
    }

    validateAlterTableType(tbl, alterTbl.getOp());

    if (tbl.isView()) {
      if (!alterTbl.getExpectView()) {
        throw new HiveException("Cannot alter a view with ALTER TABLE");
      }
    } else {
      if (alterTbl.getExpectView()) {
        throw new HiveException("Cannot alter a base table with ALTER VIEW");
      }
    }

    Table oldTbl = tbl.copy();

    if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAME) {
      tbl.setTableName(alterTbl.getNewName());
    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDCOLS) {
      List<FieldSchema> newCols = alterTbl.getNewCols();
      List<FieldSchema> oldCols = tbl.getCols();
      if (tbl.getSerializationLib().equals(
          "org.apache.hadoop.hive.serde.thrift.columnsetSerDe")) {
        console
            .printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe");
        tbl.setSerializationLib(LazySimpleSerDe.class.getName());
        tbl.getTTable().getSd().setCols(newCols);
      } else {
        // make sure the columns does not already exist
        Iterator<FieldSchema> iterNewCols = newCols.iterator();
        while (iterNewCols.hasNext()) {
          FieldSchema newCol = iterNewCols.next();
          String newColName = newCol.getName();
          Iterator<FieldSchema> iterOldCols = oldCols.iterator();
          while (iterOldCols.hasNext()) {
            String oldColName = iterOldCols.next().getName();
            if (oldColName.equalsIgnoreCase(newColName)) {
              console.printError("Column '" + newColName + "' exists");
              return 1;
            }
          }
          oldCols.add(newCol);
        }
        tbl.getTTable().getSd().setCols(oldCols);
      }
    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAMECOLUMN) {
      List<FieldSchema> oldCols = tbl.getCols();
      List<FieldSchema> newCols = new ArrayList<FieldSchema>();
      Iterator<FieldSchema> iterOldCols = oldCols.iterator();
      String oldName = alterTbl.getOldColName();
      String newName = alterTbl.getNewColName();
      String type = alterTbl.getNewColType();
      String comment = alterTbl.getNewColComment();
      boolean first = alterTbl.getFirst();
      String afterCol = alterTbl.getAfterCol();
      FieldSchema column = null;

      boolean found = false;
      int position = -1;
      if (first) {
        position = 0;
      }

      int i = 1;
      while (iterOldCols.hasNext()) {
        FieldSchema col = iterOldCols.next();
        String oldColName = col.getName();
        if (oldColName.equalsIgnoreCase(newName)
            && !oldColName.equalsIgnoreCase(oldName)) {
          console.printError("Column '" + newName + "' exists");
          return 1;
        } else if (oldColName.equalsIgnoreCase(oldName)) {
          col.setName(newName);
          if (type != null && !type.trim().equals("")) {
            col.setType(type);
          }
          if (comment != null) {
            col.setComment(comment);
          }
          found = true;
          if (first || (afterCol != null && !afterCol.trim().equals(""))) {
            column = col;
            continue;
          }
        }

        if (afterCol != null && !afterCol.trim().equals("")
            && oldColName.equalsIgnoreCase(afterCol)) {
          position = i;
        }

        i++;
        newCols.add(col);
      }

      // did not find the column
      if (!found) {
        console.printError("Column '" + oldName + "' does not exist");
        return 1;
      }
      // after column is not null, but we did not find it.
      if ((afterCol != null && !afterCol.trim().equals("")) && position < 0) {
        console.printError("Column '" + afterCol + "' does not exist");
        return 1;
      }

      if (position >= 0) {
        newCols.add(position, column);
      }

      tbl.getTTable().getSd().setCols(newCols);
    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.REPLACECOLS) {
      // change SerDe to LazySimpleSerDe if it is columnsetSerDe
      if (tbl.getSerializationLib().equals(
          "org.apache.hadoop.hive.serde.thrift.columnsetSerDe")) {
        console
            .printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe");
        tbl.setSerializationLib(LazySimpleSerDe.class.getName());
      } else if (!tbl.getSerializationLib().equals(
          MetadataTypedColumnsetSerDe.class.getName())
          && !tbl.getSerializationLib().equals(LazySimpleSerDe.class.getName())
          && !tbl.getSerializationLib().equals(ColumnarSerDe.class.getName())
          && !tbl.getSerializationLib().equals(DynamicSerDe.class.getName())) {
        console.printError("Replace columns is not supported for this table. "
            + "SerDe may be incompatible.");
        return 1;
      }
      tbl.getTTable().getSd().setCols(alterTbl.getNewCols());
    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDPROPS) {
      tbl.getTTable().getParameters().putAll(alterTbl.getProps());
    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDSERDEPROPS) {
      tbl.getTTable().getSd().getSerdeInfo().getParameters().putAll(
          alterTbl.getProps());
    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDSERDE) {
      tbl.setSerializationLib(alterTbl.getSerdeName());
      if ((alterTbl.getProps() != null) && (alterTbl.getProps().size() > 0)) {
        tbl.getTTable().getSd().getSerdeInfo().getParameters().putAll(
            alterTbl.getProps());
      }
      tbl.setFields(Hive.getFieldsFromDeserializer(tbl.getTableName(), tbl
          .getDeserializer()));
    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDFILEFORMAT) {
      if(part != null) {
        part.getTPartition().getSd().setInputFormat(alterTbl.getInputFormat());
        part.getTPartition().getSd().setOutputFormat(alterTbl.getOutputFormat());
        if (alterTbl.getSerdeName() != null) {
          part.getTPartition().getSd().getSerdeInfo().setSerializationLib(
              alterTbl.getSerdeName());
        }
      } else {
        tbl.getTTable().getSd().setInputFormat(alterTbl.getInputFormat());
        tbl.getTTable().getSd().setOutputFormat(alterTbl.getOutputFormat());
        if (alterTbl.getSerdeName() != null) {
          tbl.setSerializationLib(alterTbl.getSerdeName());
        }
      }
    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ALTERPROTECTMODE) {
      boolean protectModeEnable = alterTbl.isProtectModeEnable();
      AlterTableDesc.ProtectModeType protectMode = alterTbl.getProtectModeType();

      ProtectMode mode = null;
      if(part != null) {
        mode = part.getProtectMode();
      } else {
        mode = tbl.getProtectMode();
      }

      if (protectModeEnable
          && protectMode == AlterTableDesc.ProtectModeType.OFFLINE) {
        mode.offline = true;
      } else if (protectModeEnable
          && protectMode == AlterTableDesc.ProtectModeType.NO_DROP) {
        mode.noDrop = true;
      } else if (!protectModeEnable
          && protectMode == AlterTableDesc.ProtectModeType.OFFLINE) {
        mode.offline = false;
      } else if (!protectModeEnable
          && protectMode == AlterTableDesc.ProtectModeType.NO_DROP) {
        mode.noDrop = false;
      }

      if (part != null) {
        part.setProtectMode(mode);
      } else {
        tbl.setProtectMode(mode);
      }

    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDCLUSTERSORTCOLUMN) {
      // validate sort columns and bucket columns
      List<String> columns = Utilities.getColumnNamesFromFieldSchema(tbl
          .getCols());
      Utilities.validateColumnNames(columns, alterTbl.getBucketColumns());
      if (alterTbl.getSortColumns() != null) {
        Utilities.validateColumnNames(columns, Utilities
            .getColumnNamesFromSortCols(alterTbl.getSortColumns()));
      }

      int numBuckets = -1;
      ArrayList<String> bucketCols = null;
      ArrayList<Order> sortCols = null;

      // -1 buckets means to turn off bucketing
      if (alterTbl.getNumberBuckets() == -1) {
        bucketCols = new ArrayList<String>();
        sortCols = new ArrayList<Order>();
        numBuckets = -1;
      } else {
        bucketCols = alterTbl.getBucketColumns();
        sortCols = alterTbl.getSortColumns();
        numBuckets = alterTbl.getNumberBuckets();
      }
      tbl.getTTable().getSd().setBucketCols(bucketCols);
      tbl.getTTable().getSd().setNumBuckets(numBuckets);
      tbl.getTTable().getSd().setSortCols(sortCols);
    } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ALTERLOCATION) {
      String newLocation = alterTbl.getNewLocation();
      try {
        URI locURI = new URI(newLocation);
        if (!locURI.isAbsolute() || locURI.getScheme() == null
            || locURI.getScheme().trim().equals("")) {
          throw new HiveException(
              newLocation
                  + " is not absolute or has no scheme information. "
                  + "Please specify a complete absolute uri with scheme information.");
        }
        if (part != null) {
          part.setLocation(newLocation);
        } else {
          tbl.setDataLocation(locURI);
        }
      } catch (URISyntaxException e) {
        throw new HiveException(e);
      }
    } else {
      console.printError("Unsupported Alter commnad");
      return 1;
    }

    if(part == null) {
      if (!updateModifiedParameters(tbl.getTTable().getParameters(), conf)) {
        return 1;
      }
      try {
        tbl.checkValidity();
      } catch (HiveException e) {
        console.printError("Invalid table columns : " + e.getMessage(),
            stringifyException(e));
        return 1;
      }
    } else {
      if (!updateModifiedParameters(part.getParameters(), conf)) {
        return 1;
      }
    }

    try {
View Full Code Here

              match = false;
              break;
            }
          }
          if (match) {
            Partition p = db.getPartition(tbl, part, false);
            if (!p.canDrop()) {
              throw new HiveException("Table " + tbl.getTableName() +
                  " Partition " + p.getName() +
                  " is protected from being dropped");
            }

            partsToDelete.add(p);
            it.remove();
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.metadata.Partition

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.