Package org.apache.hadoop.hive.ql.plan

Examples of org.apache.hadoop.hive.ql.plan.AddPartitionDesc


        ifNotExists = true;
        break;
      case HiveParser.TOK_PARTSPEC:
        if (currentPart != null) {
          validatePartitionValues(currentPart);
          AddPartitionDesc addPartitionDesc = new AddPartitionDesc(
              db.getCurrentDatabase(), tblName, currentPart,
              currentLocation, ifNotExists, expectView);
          partitionDescs.add(addPartitionDesc);
        }
        // create new partition, set values
        currentLocation = null;
        currentPart = partIter.next();
        break;
      case HiveParser.TOK_PARTITIONLOCATION:
        // if location specified, set in partition
        currentLocation = unescapeSQLString(child.getChild(0).getText());
        break;
      default:
        throw new SemanticException("Unknown child: " + child);
      }
    }

    // add the last one
    if (currentPart != null) {
      validatePartitionValues(currentPart);
      AddPartitionDesc addPartitionDesc = new AddPartitionDesc(
          db.getCurrentDatabase(), tblName, currentPart,
          currentLocation, ifNotExists, expectView);
      partitionDescs.add(addPartitionDesc);
    }
View Full Code Here


        for (FieldSchema fsc : partCols) {
          partColNames.add(fsc.getName());
        }
        List<Partition> partitions = rv.getValue();
        for (Partition partition : partitions) {
          AddPartitionDesc partDesc = new AddPartitionDesc(dbname, tblDesc.getTableName(),
              EximUtil.makePartSpec(tblDesc.getPartCols(), partition.getValues()),
              partition.getSd().getLocation(), partition.getParameters());
          partDesc.setInputFormat(partition.getSd().getInputFormat());
          partDesc.setOutputFormat(partition.getSd().getOutputFormat());
          partDesc.setNumBuckets(partition.getSd().getNumBuckets());
          partDesc.setCols(partition.getSd().getCols());
          partDesc.setSerializationLib(partition.getSd().getSerdeInfo().getSerializationLib());
          partDesc.setSerdeParams(partition.getSd().getSerdeInfo().getParameters());
          partDesc.setBucketCols(partition.getSd().getBucketCols());
          partDesc.setSortCols(partition.getSd().getSortCols());
          partDesc.setLocation(new Path(fromPath,
              Warehouse.makePartName(tblDesc.getPartCols(), partition.getValues())).toString());
          partitionDescs.add(partDesc);
        }
      } catch (IOException e) {
        throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(), e);
      }
      LOG.debug("metadata read and parsed");
      for (int i = 1; i < ast.getChildCount(); ++i) {
        ASTNode child = (ASTNode) ast.getChild(i);
        switch (child.getToken().getType()) {
        case HiveParser.KW_EXTERNAL:
          tblDesc.setExternal(true);
          break;
        case HiveParser.TOK_TABLELOCATION:
          String location = unescapeSQLString(child.getChild(0).getText());
          location = EximUtil.relativeToAbsolutePath(conf, location);
          tblDesc.setLocation(location);
          break;
        case HiveParser.TOK_TAB:
          Tree tableTree = child.getChild(0);
          // initialize destination table/partition
          String tableName = getUnescapedName((ASTNode)tableTree);
          tblDesc.setTableName(tableName);
          // get partition metadata if partition specified
          LinkedHashMap<String, String> partSpec = new LinkedHashMap<String, String>();
          if (child.getChildCount() == 2) {
            ASTNode partspec = (ASTNode) child.getChild(1);
            // partSpec is a mapping from partition column name to its value.
            for (int j = 0; j < partspec.getChildCount(); ++j) {
              ASTNode partspec_val = (ASTNode) partspec.getChild(j);
              String val = null;
              String colName = unescapeIdentifier(partspec_val.getChild(0)
                    .getText().toLowerCase());
              if (partspec_val.getChildCount() < 2) { // DP in the form of T
                                                      // partition (ds, hr)
                throw new SemanticException(
                      ErrorMsg.INVALID_PARTITION
                          .getMsg(" - Dynamic partitions not allowed"));
              } else { // in the form of T partition (ds="2010-03-03")
                val = stripQuotes(partspec_val.getChild(1).getText());
              }
              partSpec.put(colName, val);
            }
            boolean found = false;
            for (Iterator<AddPartitionDesc> partnIter = partitionDescs
                  .listIterator(); partnIter.hasNext();) {
              AddPartitionDesc addPartitionDesc = partnIter.next();
              if (!found && addPartitionDesc.getPartSpec().equals(partSpec)) {
                found = true;
              } else {
                partnIter.remove();
              }
            }
            if (!found) {
              throw new SemanticException(
                    ErrorMsg.INVALID_PARTITION
                        .getMsg(" - Specified partition not found in import directory"));
            }
          }
        }
      }
      if (tblDesc.getTableName() == null) {
        throw new SemanticException(ErrorMsg.NEED_TABLE_SPECIFICATION.getMsg());
      } else {
        conf.set("import.destination.table", tblDesc.getTableName());
        for (AddPartitionDesc addPartitionDesc : partitionDescs) {
          addPartitionDesc.setTableName(tblDesc.getTableName());
        }
      }
      Warehouse wh = new Warehouse(conf);
      try {
        Table table = db.getTable(tblDesc.getTableName());
        checkTable(table, tblDesc);
        LOG.debug("table " + tblDesc.getTableName()
            + " exists: metadata checked");
        tableExists = true;
        conf.set("import.destination.dir", table.getDataLocation().toString());
        if (table.isPartitioned()) {
          LOG.debug("table partitioned");
          for (AddPartitionDesc addPartitionDesc : partitionDescs) {
            if (db.getPartition(table, addPartitionDesc.getPartSpec(), false) == null) {
              rootTasks.add(addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc));
            } else {
              throw new SemanticException(
                  ErrorMsg.PARTITION_EXISTS
                      .getMsg(partSpecToString(addPartitionDesc.getPartSpec())));
            }
          }
        } else {
          LOG.debug("table non-partitioned");
          checkTargetLocationEmpty(fs, new Path(table.getDataLocation()
View Full Code Here

      CreateViewDesc crtView = work.getCreateViewDesc();
      if (crtView != null) {
        return createView(db, crtView);
      }

      AddPartitionDesc addPartitionDesc = work.getAddPartitionDesc();
      if (addPartitionDesc != null) {
        return addPartition(db, addPartitionDesc);
      }

      RenamePartitionDesc renamePartitionDesc = work.getRenamePartitionDesc();
View Full Code Here

        ifNotExists = true;
        break;
      case HiveParser.TOK_PARTSPEC:
        if (currentPart != null) {
          validatePartitionValues(currentPart);
          AddPartitionDesc addPartitionDesc = new AddPartitionDesc(
              db.getCurrentDatabase(), tblName, currentPart,
              currentLocation, ifNotExists, expectView);
          partitionDescs.add(addPartitionDesc);
        }
        // create new partition, set values
        currentLocation = null;
        currentPart = partIter.next();
        break;
      case HiveParser.TOK_PARTITIONLOCATION:
        // if location specified, set in partition
        currentLocation = unescapeSQLString(child.getChild(0).getText());
        break;
      default:
        throw new SemanticException("Unknown child: " + child);
      }
    }

    // add the last one
    if (currentPart != null) {
      validatePartitionValues(currentPart);
      AddPartitionDesc addPartitionDesc = new AddPartitionDesc(
          db.getCurrentDatabase(), tblName, currentPart,
          currentLocation, ifNotExists, expectView);
      partitionDescs.add(addPartitionDesc);
    }
View Full Code Here

      CreateViewDesc crtView = work.getCreateViewDesc();
      if (crtView != null) {
        return createView(db, crtView);
      }

      AddPartitionDesc addPartitionDesc = work.getAddPartitionDesc();
      if (addPartitionDesc != null) {
        return addPartitions(db, addPartitionDesc);
      }

      RenamePartitionDesc renamePartitionDesc = work.getRenamePartitionDesc();
View Full Code Here

      Table t = db.newTable(msckDesc.getTableName());
      checker.checkMetastore(t.getDbName(), t.getTableName(), msckDesc.getPartSpecs(), result);
      List<CheckResult.PartitionResult> partsNotInMs = result.getPartitionsNotInMs();
      if (msckDesc.isRepairPartitions() && !partsNotInMs.isEmpty()) {
        Table table = db.getTable(msckDesc.getTableName());
        AddPartitionDesc apd = new AddPartitionDesc(
            table.getDbName(), table.getTableName(), false);
        try {
          for (CheckResult.PartitionResult part : partsNotInMs) {
            apd.addPartition(Warehouse.makeSpecFromName(part.getPartitionName()), null);
            repairOutput.add("Repair: Added partition to metastore "
                + msckDesc.getTableName() + ':' + part.getPartitionName());
          }
          db.createPartitions(apd);
        } catch (Exception e) {
View Full Code Here

    int start = ifNotExists ? 2 : 1;

    String currentLocation = null;
    Map<String, String> currentPart = null;
    // Parser has done some verification, so the order of tokens doesn't need to be verified here.
    AddPartitionDesc addPartitionDesc = new AddPartitionDesc(tab.getDbName(), tblName, ifNotExists);
    for (int num = start; num < numCh; num++) {
      ASTNode child = (ASTNode) ast.getChild(num);
      switch (child.getToken().getType()) {
      case HiveParser.TOK_PARTSPEC:
        if (currentPart != null) {
          addPartitionDesc.addPartition(currentPart, currentLocation);
          currentLocation = null;
        }
        currentPart = getPartSpec(child);
        validatePartitionValues(currentPart); // validate reserved values
        validatePartSpec(tab, currentPart, child, conf, true);
        break;
      case HiveParser.TOK_PARTITIONLOCATION:
        // if location specified, set in partition
        if (isView) {
          throw new SemanticException("LOCATION clause illegal for view partition");
        }
        currentLocation = unescapeSQLString(child.getChild(0).getText());
        boolean isLocal = false;
        try {
          // do best effor to determine if this is a local file
          String scheme = new URI(currentLocation).getScheme();
          if (scheme != null) {
            isLocal = FileUtils.isLocalFile(conf, currentLocation);
          }
        } catch (URISyntaxException e) {
          LOG.warn("Unable to create URI from " + currentLocation, e);
        }
        inputs.add(new ReadEntity(new Path(currentLocation), isLocal));
        break;
      default:
        throw new SemanticException("Unknown child: " + child);
      }
    }

    // add the last one
    if (currentPart != null) {
      addPartitionDesc.addPartition(currentPart, currentLocation);
    }

    if (addPartitionDesc.getPartitionCount() == 0) {
      // nothing to do
      return;
    }

    rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), addPartitionDesc), conf));

    if (isView) {
      // Compile internal query to capture underlying table partition dependencies
      StringBuilder cmd = new StringBuilder();
      cmd.append("SELECT * FROM ");
      cmd.append(HiveUtils.unparseIdentifier(tblName));
      cmd.append(" WHERE ");
      boolean firstOr = true;
      for (int i = 0; i < addPartitionDesc.getPartitionCount(); ++i) {
        AddPartitionDesc.OnePartitionDesc partitionDesc = addPartitionDesc.getPartition(i);
        if (firstOr) {
          firstOr = false;
        } else {
          cmd.append(" OR ");
        }
View Full Code Here

          partColNames.add(fsc.getName());
        }
        List<Partition> partitions = rv.getValue();
        for (Partition partition : partitions) {
          // TODO: this should not create AddPartitionDesc per partition
          AddPartitionDesc partsDesc = new AddPartitionDesc(dbname, tblDesc.getTableName(),
              EximUtil.makePartSpec(tblDesc.getPartCols(), partition.getValues()),
              partition.getSd().getLocation(), partition.getParameters());
          AddPartitionDesc.OnePartitionDesc partDesc = partsDesc.getPartition(0);
          partDesc.setInputFormat(partition.getSd().getInputFormat());
          partDesc.setOutputFormat(partition.getSd().getOutputFormat());
          partDesc.setNumBuckets(partition.getSd().getNumBuckets());
          partDesc.setCols(partition.getSd().getCols());
          partDesc.setSerializationLib(partition.getSd().getSerdeInfo().getSerializationLib());
          partDesc.setSerdeParams(partition.getSd().getSerdeInfo().getParameters());
          partDesc.setBucketCols(partition.getSd().getBucketCols());
          partDesc.setSortCols(partition.getSd().getSortCols());
          partDesc.setLocation(new Path(fromPath,
              Warehouse.makePartName(tblDesc.getPartCols(), partition.getValues())).toString());
          partitionDescs.add(partsDesc);
        }
      } catch (IOException e) {
        throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(), e);
      }
      LOG.debug("metadata read and parsed");
      for (int i = 1; i < ast.getChildCount(); ++i) {
        ASTNode child = (ASTNode) ast.getChild(i);
        switch (child.getToken().getType()) {
        case HiveParser.KW_EXTERNAL:
          tblDesc.setExternal(true);
          break;
        case HiveParser.TOK_TABLELOCATION:
          String location = unescapeSQLString(child.getChild(0).getText());
          location = EximUtil.relativeToAbsolutePath(conf, location);
          tblDesc.setLocation(location);
          break;
        case HiveParser.TOK_TAB:
          Tree tableTree = child.getChild(0);
          // initialize destination table/partition
          String tableName = getUnescapedName((ASTNode)tableTree);
          tblDesc.setTableName(tableName);
          // get partition metadata if partition specified
          LinkedHashMap<String, String> partSpec = new LinkedHashMap<String, String>();
          if (child.getChildCount() == 2) {
            ASTNode partspec = (ASTNode) child.getChild(1);
            // partSpec is a mapping from partition column name to its value.
            for (int j = 0; j < partspec.getChildCount(); ++j) {
              ASTNode partspec_val = (ASTNode) partspec.getChild(j);
              String val = null;
              String colName = unescapeIdentifier(partspec_val.getChild(0)
                    .getText().toLowerCase());
              if (partspec_val.getChildCount() < 2) { // DP in the form of T
                                                      // partition (ds, hr)
                throw new SemanticException(
                      ErrorMsg.INVALID_PARTITION
                          .getMsg(" - Dynamic partitions not allowed"));
              } else { // in the form of T partition (ds="2010-03-03")
                val = stripQuotes(partspec_val.getChild(1).getText());
              }
              partSpec.put(colName, val);
            }
            boolean found = false;
            for (Iterator<AddPartitionDesc> partnIter = partitionDescs
                  .listIterator(); partnIter.hasNext();) {
              AddPartitionDesc addPartitionDesc = partnIter.next();
              if (!found && addPartitionDesc.getPartition(0).getPartSpec().equals(partSpec)) {
                found = true;
              } else {
                partnIter.remove();
              }
            }
            if (!found) {
              throw new SemanticException(
                    ErrorMsg.INVALID_PARTITION
                        .getMsg(" - Specified partition not found in import directory"));
            }
          }
        }
      }
      if (tblDesc.getTableName() == null) {
        throw new SemanticException(ErrorMsg.NEED_TABLE_SPECIFICATION.getMsg());
      } else {
        conf.set("import.destination.table", tblDesc.getTableName());
        for (AddPartitionDesc addPartitionDesc : partitionDescs) {
          addPartitionDesc.setTableName(tblDesc.getTableName());
        }
      }
      Warehouse wh = new Warehouse(conf);
      try {
        Table table = db.getTable(tblDesc.getTableName());
        checkTable(table, tblDesc);
        LOG.debug("table " + tblDesc.getTableName()
            + " exists: metadata checked");
        tableExists = true;
        conf.set("import.destination.dir", table.getDataLocation().toString());
        if (table.isPartitioned()) {
          LOG.debug("table partitioned");
          for (AddPartitionDesc addPartitionDesc : partitionDescs) {
            Map<String, String> partSpec = addPartitionDesc.getPartition(0).getPartSpec();
            if (db.getPartition(table, partSpec, false) == null) {
              rootTasks.add(addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc));
            } else {
              throw new SemanticException(
                  ErrorMsg.PARTITION_EXISTS.getMsg(partSpecToString(partSpec)));
View Full Code Here

        ifNotExists = true;
        break;
      case HiveParser.TOK_PARTSPEC:
        if (currentPart != null) {
          validatePartitionValues(currentPart);
          AddPartitionDesc addPartitionDesc = new AddPartitionDesc(
              db.getCurrentDatabase(), tblName, currentPart,
              currentLocation, ifNotExists);
          rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
              addPartitionDesc), conf));
        }
        // create new partition, set values
        currentLocation = null;
        currentPart = partIter.next();
        break;
      case HiveParser.TOK_PARTITIONLOCATION:
        // if location specified, set in partition
        currentLocation = unescapeSQLString(child.getChild(0).getText());
        break;
      default:
        throw new SemanticException("Unknown child: " + child);
      }
    }

    // add the last one
    if (currentPart != null) {
      validatePartitionValues(currentPart);
      AddPartitionDesc addPartitionDesc = new AddPartitionDesc(
          db.getCurrentDatabase(), tblName, currentPart,
          currentLocation, ifNotExists);
      rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
          addPartitionDesc), conf));
    }
View Full Code Here

      CreateViewDesc crtView = work.getCreateViewDesc();
      if (crtView != null) {
        return createView(db, crtView);
      }

      AddPartitionDesc addPartitionDesc = work.getAddPartitionDesc();
      if (addPartitionDesc != null) {
        return addPartition(db, addPartitionDesc);
      }

      RenamePartitionDesc renamePartitionDesc = work.getRenamePartitionDesc();
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.plan.AddPartitionDesc

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.