Package org.apache.hadoop.hive.ql.hooks

Examples of org.apache.hadoop.hive.ql.hooks.ReadEntity


      try {
        db.alterTable(tblName, tbl);
      } catch (InvalidOperationException e) {
        throw new HiveException("Uable to update table");
      }
      work.getInputs().add(new ReadEntity(tbl));
      work.getOutputs().add(new WriteEntity(tbl));
    } else {
      Partition part = db.getPartition(tbl, touchDesc.getPartSpec(), false);
      if (part == null) {
        throw new HiveException("Specified partition does not exist");
      }
      try {
        db.alterPartition(tblName, part);
      } catch (InvalidOperationException e) {
        throw new HiveException(e);
      }
      work.getInputs().add(new ReadEntity(part));
      work.getOutputs().add(new WriteEntity(part));
    }
    return 0;
  }
View Full Code Here


    // the write entity
    // contains the new table. This is needed for rename - both the old and the
    // new table names are
    // passed
    if(part != null) {
      work.getInputs().add(new ReadEntity(part));
      work.getOutputs().add(new WriteEntity(part));
    }
    else if (allPartitions != null ){
      for (Partition tmpPart: allPartitions) {
        work.getInputs().add(new ReadEntity(tmpPart));
        work.getOutputs().add(new WriteEntity(tmpPart));
      }
    }
    else {
      work.getInputs().add(new ReadEntity(oldTbl));
      work.getOutputs().add(new WriteEntity(tbl));
    }
    return 0;
  }
View Full Code Here

    boolean isFirstPart = true;
    boolean emptyInput = true;
    boolean singlePartition = (parts.size() == 1);
    for (Partition part : parts) {
      if (part.getTable().isPartitioned()) {
        inputs.add(new ReadEntity(part));
      } else {
        inputs.add(new ReadEntity(part.getTable()));
      }

      // Later the properties have to come from the partition as opposed
      // to from the table in order to support versioning.
      Path[] paths = null;
View Full Code Here

      try {
        db.alterTable(tblName, tbl);
      } catch (InvalidOperationException e) {
        throw new HiveException("Uable to update table");
      }
      work.getInputs().add(new ReadEntity(tbl));
      work.getOutputs().add(new WriteEntity(tbl));
    } else {
      Partition part = db.getPartition(tbl, touchDesc.getPartSpec(), false);
      if (part == null) {
        throw new HiveException("Specified partition does not exist");
      }
      try {
        db.alterPartition(tblName, part);
      } catch (InvalidOperationException e) {
        throw new HiveException(e);
      }
      work.getInputs().add(new ReadEntity(part));
      work.getOutputs().add(new WriteEntity(part));
    }
    return 0;
  }
View Full Code Here

    // This is kind of hacky - the read entity contains the old table, whereas
    // the write entity
    // contains the new table. This is needed for rename - both the old and the
    // new table names are
    // passed
    work.getInputs().add(new ReadEntity(oldTbl));
    work.getOutputs().add(new WriteEntity(tbl));
    return 0;
  }
View Full Code Here

    plan.getAliasToPartnInfo().put(alias_id, aliasPartnDesc);
    SamplePruner samplePruner = parseCtx.getAliasToSamplePruner().get(alias_id);
   
    for (Partition part : parts) {
      if (part.getTable().isPartitioned())
        inputs.add(new ReadEntity(part));
      else
        inputs.add(new ReadEntity(part.getTable()));

      // Later the properties have to come from the partition as opposed
      // to from the table in order to support versioning.
      Path paths[];
      if (samplePruner != null) {
View Full Code Here

      Iterator<Map.Entry<String, Table>> iter = qb.getMetaData().getAliasToTable().entrySet().iterator();
      Table tab = ((Map.Entry<String, Table>)iter.next()).getValue();
      if (!tab.isPartitioned()) {
        if (qbParseInfo.getDestToWhereExpr().isEmpty())
          fetch = new fetchWork(tab.getPath().toString(), Utilities.getTableDesc(tab), qb.getParseInfo().getOuterQueryLimit());
        inputs.add(new ReadEntity(tab));
      }
      else {
        if (aliasToPruner.size() == 1) {
          Iterator<Map.Entry<String, org.apache.hadoop.hive.ql.parse.ASTPartitionPruner>> iterP =
            aliasToPruner.entrySet().iterator();
          org.apache.hadoop.hive.ql.parse.ASTPartitionPruner pr =
            ((Map.Entry<String, org.apache.hadoop.hive.ql.parse.ASTPartitionPruner>)iterP.next()).getValue();
          if (pr.onlyContainsPartitionCols()) {
            List<String> listP = new ArrayList<String>();
            List<partitionDesc> partP = new ArrayList<partitionDesc>();
            PrunedPartitionList partsList = null;
            Set<Partition> parts = null;
            try {
              partsList = pr.prune();
              // If there is any unknown partition, create a map-reduce job for the filter to prune correctly
              if (partsList.getUnknownPartns().size() == 0) {
                parts = partsList.getConfirmedPartns();
                Iterator<Partition> iterParts = parts.iterator();
                while (iterParts.hasNext()) {
                  Partition part = iterParts.next();
                  listP.add(part.getPartitionPath().toString());
                  partP.add(Utilities.getPartitionDesc(part));
                  inputs.add(new ReadEntity(part));
                }
                fetch = new fetchWork(listP, partP, qb.getParseInfo().getOuterQueryLimit());
              }
            } catch (HiveException e) {
              // Has to use full name to make sure it does not conflict with org.apache.commons.lang.StringUtils
View Full Code Here

        Path toPartPath = new Path(toURI.toString(), partition.getName());
        Task<? extends Serializable> rTask = TaskFactory.get(
            new CopyWork(fromURI.toString(), toPartPath.toString(), false),
            conf);
        rootTasks.add(rTask);
        inputs.add(new ReadEntity(partition));
      }
    } else {
      URI fromURI = ts.tableHandle.getDataLocation();
      Path toDataPath = new Path(toURI.toString(), "data");
      Task<? extends Serializable> rTask = TaskFactory.get(new CopyWork(
          fromURI.toString(), toDataPath.toString(), false), conf);
      rootTasks.add(rTask);
      inputs.add(new ReadEntity(ts.tableHandle));
    }
    outputs.add(new WriteEntity(toURI.toString(),
        toURI.getScheme().equals("hdfs") ? true : false));
  }
View Full Code Here

    boolean throwException =
      !ifExists && !HiveConf.getBoolVar(conf, ConfVars.DROPIGNORESNONEXISTENT);
    try {
      Table tab = db.getTable(db.getCurrentDatabase(), tableName, throwException);
      if (tab != null) {
        inputs.add(new ReadEntity(tab));
        outputs.add(new WriteEntity(tab));
      }
    } catch (HiveException e) {
      throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName));
    }
View Full Code Here

    alterTblDesc.setOldName(tableName);

    try {
      Table tab = db.getTable(db.getCurrentDatabase(), tableName, false);
      if (tab != null) {
        inputs.add(new ReadEntity(tab));
        outputs.add(new WriteEntity(tab));
      }
    } catch (HiveException e) {
      throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName));
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.hooks.ReadEntity

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.