Package org.apache.hadoop.hive.ql.metadata

Examples of org.apache.hadoop.hive.ql.metadata.Partition


      outputs.add(new WriteEntity(tab));
    }
    else {
      inputs.add(new ReadEntity(tab));
      if (desc == null || desc.getOp() != AlterTableDesc.AlterTableTypes.ALTERPROTECTMODE) {
        Partition part = getPartition(tab, partSpec, true);
        outputs.add(new WriteEntity(part));
      }
      else {
        for (Partition part : getPartitions(tab, partSpec, true)) {
          outputs.add(new WriteEntity(part));
View Full Code Here


      if (tblObj.isPartitioned()) {
        if (partSpec == null) {
          throw new SemanticException("source table " + tableName
              + " is partitioned but no partition desc found.");
        } else {
          Partition part = getPartition(tblObj, partSpec, false);
          if (part == null) {
            throw new SemanticException("source table " + tableName
                + " is partitioned but partition not found.");
          }
          bucketCols = part.getBucketCols();
          inputFormatClass = part.getInputFormatClass();
          isArchived = ArchiveUtils.isArchived(part);

          Path tabPath = tblObj.getPath();
          Path partPath = part.getPartitionPath();

          // if the table is in a different dfs than the partition,
          // replace the partition's dfs with the table's dfs.
          newTblPartLoc = new Path(tabPath.toUri().getScheme(), tabPath.toUri()
              .getAuthority(), partPath.toUri().getPath());

          oldTblPartLoc = partPath;

          lbCtx = constructListBucketingCtx(part.getSkewedColNames(), part.getSkewedColValues(),
              part.getSkewedColValueLocationMaps(), part.isStoredAsSubDirectories(), conf);
        }
      } else {
        inputFormatClass = tblObj.getInputFormatClass();
        bucketCols = tblObj.getBucketCols();
View Full Code Here

    for (int num = start; num < numCh; num++) {
      ASTNode child = (ASTNode) ast.getChild(num);
      switch (child.getToken().getType()) {
      case HiveParser.TOK_PARTSPEC:
        if (currentPart != null) {
          Partition partition = getPartitionForOutput(tab, currentPart);
          if (partition == null || !ifNotExists) {
            AddPartitionDesc addPartitionDesc = new AddPartitionDesc(
              tab.getDbName(), tblName, currentPart,
              currentLocation, ifNotExists, expectView);
            partitionDescs.add(addPartitionDesc);
          }
          currentLocation = null;
        }
        currentPart = getPartSpec(child);
        validatePartSpec(tab, currentPart, (ASTNode)child, conf);
        break;
      case HiveParser.TOK_PARTITIONLOCATION:
        // if location specified, set in partition
        currentLocation = unescapeSQLString(child.getChild(0).getText());
        break;
      default:
        throw new SemanticException("Unknown child: " + child);
      }
    }

    // add the last one
    if (currentPart != null) {
      Partition partition = getPartitionForOutput(tab, currentPart);
      if (partition == null || !ifNotExists) {
        AddPartitionDesc addPartitionDesc = new AddPartitionDesc(
          tab.getDbName(), tblName, currentPart,
          currentLocation, ifNotExists, expectView);
        partitionDescs.add(addPartitionDesc);
View Full Code Here

  private Partition getPartitionForOutput(Table tab, Map<String, String> currentPart)
    throws SemanticException {
    validatePartitionValues(currentPart);
    try {
      Partition partition = db.getPartition(tab, currentPart, false);
      if (partition != null) {
        outputs.add(new WriteEntity(partition));
      }
      return partition;
    } catch (HiveException e) {
View Full Code Here

          LOG.error("Got HiveException during obtaining list of partitions");
        }
      } else {
        parts = new ArrayList<Partition>();
        try {
          Partition p = db.getPartition(tab, partSpec, false);
          if (p != null) {
            parts.add(p);
          }
        } catch (HiveException e) {
          LOG.debug("Wrong specification");
View Full Code Here

  }

  private Partition getPartition(Table table, Map<String, String> partSpec, boolean throwException)
      throws SemanticException {
    try {
      Partition partition = db.getPartition(table, partSpec, false);
      if (partition == null && throwException) {
        throw new SemanticException(toMessage(ErrorMsg.INVALID_PARTITION, partSpec));
      }
      return partition;
    } catch (HiveException e) {
View Full Code Here

      for (indx = 2; indx < names.length; indx++) {
        String[] partVals = names[indx].split("=");
        partSpec.put(partVals[0], partVals[1]);
      }

      Partition partn;
      try {
        partn = db.getPartition(tab, partSpec, false);
      } catch (HiveException e) {
        partn = null;
      }
View Full Code Here

      }
      List<Partition> partitions = prunedParts.getNotDeniedPartns();
      // Populate the names and order of columns for the first partition of the
      // first table
      if ((pos == 0) && (partitions != null) && (!partitions.isEmpty())) {
        Partition firstPartition = partitions.get(0);
        sortColumnsFirstTable.addAll(firstPartition.getSortCols());
      }

      for (Partition partition : prunedParts.getNotDeniedPartns()) {
        if (!checkSortColsAndJoinCols(partition.getSortCols(),
          joinCols,
View Full Code Here

    }

    if (subject.getTable()) {
      Table tbl = getTable(subject.getObject(), true);
      if (subject.getPartSpec() != null) {
        Partition part = getPartition(tbl, subject.getPartSpec(), true);
        outputs.add(new WriteEntity(part));
      } else {
        outputs.add(new WriteEntity(tbl));
      }
    }
View Full Code Here

          outputs.add(new WriteEntity(partition));
        }
      }
    } else {
      if (isFullSpec(table, partSpec)) {
        Partition partition = getPartition(table, partSpec, true);
        outputs.add(new WriteEntity(partition));
      } else {
        for (Partition partition : getPartitions(table, partSpec, false)) {
          outputs.add(new WriteEntity(partition));
        }
      }
    }

    TruncateTableDesc truncateTblDesc = new TruncateTableDesc(tableName, partSpec);

    DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), truncateTblDesc);
    Task<? extends Serializable> truncateTask = TaskFactory.get(ddlWork, conf);

    // Is this a truncate column command
    List<String> columnNames = null;
    if (ast.getChildCount() == 2) {
      try {
        columnNames = getColumnNames((ASTNode)ast.getChild(1));

        // Throw an error if the table is indexed
        List<Index> indexes = db.getIndexes(table.getDbName(), tableName, (short)1);
        if (indexes != null && indexes.size() > 0) {
          throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_INDEXED_TABLE.getMsg());
        }

        List<String> bucketCols = null;
        Class<? extends InputFormat> inputFormatClass = null;
        boolean isArchived = false;
        Path newTblPartLoc = null;
        Path oldTblPartLoc = null;
        List<FieldSchema> cols = null;
        ListBucketingCtx lbCtx = null;
        boolean isListBucketed = false;
        List<String> listBucketColNames = null;

        if (table.isPartitioned()) {
          Partition part = db.getPartition(table, partSpec, false);

          Path tabPath = table.getPath();
          Path partPath = part.getPartitionPath();

          // if the table is in a different dfs than the partition,
          // replace the partition's dfs with the table's dfs.
          newTblPartLoc = new Path(tabPath.toUri().getScheme(), tabPath.toUri()
              .getAuthority(), partPath.toUri().getPath());

          oldTblPartLoc = partPath;

          cols = part.getCols();
          bucketCols = part.getBucketCols();
          inputFormatClass = part.getInputFormatClass();
          isArchived = ArchiveUtils.isArchived(part);
          lbCtx = constructListBucketingCtx(part.getSkewedColNames(), part.getSkewedColValues(),
              part.getSkewedColValueLocationMaps(), part.isStoredAsSubDirectories(), conf);
          isListBucketed = part.isStoredAsSubDirectories();
          listBucketColNames = part.getSkewedColNames();
        } else {
          // input and output are the same
          oldTblPartLoc = table.getPath();
          newTblPartLoc = table.getPath();
          cols  = table.getCols();
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.metadata.Partition

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.