Package org.apache.hadoop.hive.ql.plan

Examples of org.apache.hadoop.hive.ql.plan.partitionDesc


        // initialize decoder once based on what table we are processing
        if(deserializer != null) {
          continue;
        }

        partitionDesc pd = conf.getPathToPartitionInfo().get(onefile);
        LinkedHashMap<String, String> partSpec = pd.getPartSpec();
        tableDesc td = pd.getTableDesc();
        Properties p = td.getProperties();
        // Add alias, table name, and partitions to hadoop conf
        HiveConf.setVar(hconf, HiveConf.ConfVars.HIVETABLENAME, String.valueOf(p.getProperty("name")));
        HiveConf.setVar(hconf, HiveConf.ConfVars.HIVEPARTITIONNAME, String.valueOf(partSpec));
        try {
View Full Code Here


  }
//[endexclude_0_19]

  private tableDesc getTableDescFromPath(Path dir) throws IOException {

    partitionDesc partDesc = pathToPartitionInfo.get(dir.toString());
    if (partDesc == null) {
      partDesc = pathToPartitionInfo.get(dir.toUri().getPath());
    }
    if (partDesc == null) {
      throw new IOException("cannot find dir = " + dir.toString() + " in partToPartitionInfo!");
    }

    tableDesc table = partDesc.getTableDesc();
    if (table == null) {
      throw new IOException("Input " + dir.toString() +
          " does not have associated InputFormat in mapredWork!");
    }
View Full Code Here

      String taskTmpDir = taskTmpDirLst.get(pos);
      tableDesc tt_desc = tt_descLst.get(pos);
      if (plan.getPathToAliases().get(taskTmpDir) == null) {
        plan.getPathToAliases().put(taskTmpDir, new ArrayList<String>());
        plan.getPathToAliases().get(taskTmpDir).add(taskTmpDir);
        plan.getPathToPartitionInfo().put(taskTmpDir, new partitionDesc(tt_desc, null));
        plan.getAliasToWork().put(taskTmpDir, currUnionOp);
      }
    }
  }
View Full Code Here

    if (cplan.getPathToAliases().get(taskTmpDir) == null) {
      cplan.getPathToAliases().put(taskTmpDir, new ArrayList<String>());
    }
   
    cplan.getPathToAliases().get(taskTmpDir).add(streamDesc);
    cplan.getPathToPartitionInfo().put(taskTmpDir, new partitionDesc(tt_desc, null));
    cplan.getAliasToWork().put(streamDesc, op);

    // TODO: Allocate work to remove the temporary files and make that
    // dependent on the redTask
    if (reducer.getClass() == JoinOperator.class)
View Full Code Here

      String taskTmpDir = taskTmpDirLst.get(pos);
      tableDesc tt_desc = tt_descLst.get(pos);
      if (plan.getPathToAliases().get(taskTmpDir) == null) {
        plan.getPathToAliases().put(taskTmpDir, new ArrayList<String>());
        plan.getPathToAliases().get(taskTmpDir).add(taskTmpDir);
        plan.getPathToPartitionInfo().put(taskTmpDir, new partitionDesc(tt_desc, null));
        plan.getAliasToWork().put(taskTmpDir, currUnionOp);
      }
    }
  }
View Full Code Here

    Set<Partition> parts = null;
    // pass both confirmed and unknown partitions through the map-reduce framework

    parts = partsList.getConfirmedPartns();
    parts.addAll(partsList.getUnknownPartns());
    partitionDesc aliasPartnDesc = null;
    try{
      if (parts.isEmpty()) {
        if (!partsList.getDeniedPartns().isEmpty())
          aliasPartnDesc = Utilities.getPartitionDesc(partsList.getDeniedPartns()
              .iterator().next());
      } else {
        aliasPartnDesc = Utilities.getPartitionDesc(parts.iterator().next());
      }
    } catch (HiveException e) {
      LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
      throw new SemanticException(e.getMessage(), e);
    }

    // The table does not have any partitions
    if (aliasPartnDesc == null)
      aliasPartnDesc = new partitionDesc(Utilities.getTableDesc(parseCtx.getTopToTable().get(topOp)), null);

    plan.getAliasToPartnInfo().put(alias_id, aliasPartnDesc);

    for (Partition part : parts) {
      if (part.getTable().isPartitioned())
        inputs.add(new ReadEntity(part));
      else
        inputs.add(new ReadEntity(part.getTable()));

      // Later the properties have to come from the partition as opposed
      // to from the table in order to support versioning.
      Path paths[];
      sampleDesc sampleDescr = parseCtx.getOpToSamplePruner().get(topOp);

      if (sampleDescr != null) {
        paths = SamplePruner.prune(part, sampleDescr);
      }
      else {
        paths = part.getPath();
      }

      // is it a partitioned table ?
      if (!part.getTable().isPartitioned()) {
        assert ((tblDir == null) && (tblDesc == null));

        tblDir = paths[0];
        tblDesc = Utilities.getTableDesc(part.getTable());
      }

      for (Path p: paths) {
        if(p == null)
          continue;
        String path = p.toString();
        LOG.debug("Adding " + path + " of table" + alias_id);

        partDir.add(p);
        try{
          partDesc.add(Utilities.getPartitionDesc(part));
        } catch (HiveException e) {
          LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
          throw new SemanticException(e.getMessage(), e);
        }
      }
    }

    Iterator<Path>          iterPath      = partDir.iterator();
    Iterator<partitionDesc> iterPartnDesc = partDesc.iterator();

    if (!local) {
      while (iterPath.hasNext()) {
        assert iterPartnDesc.hasNext();
        String path = iterPath.next().toString();

        partitionDesc prtDesc = iterPartnDesc.next();

        // Add the path to alias mapping
        if (plan.getPathToAliases().get(path) == null) {
          plan.getPathToAliases().put(path, new ArrayList<String>());
        }
View Full Code Here

    if (!local) {
      if (plan.getPathToAliases().get(path) == null)
        plan.getPathToAliases().put(path, new ArrayList<String>());
      plan.getPathToAliases().get(path).add(alias);
      plan.getPathToPartitionInfo().put(path, new partitionDesc(tt_desc, null));
      plan.getAliasToWork().put(alias, topOp);
    }
    else {
      // populate local work if needed
      mapredLocalWork localPlan = plan.getMapLocalWork();
View Full Code Here

      GenMRMapJoinCtx mjCtx = ctx.getMapJoinCtx(ctx.getCurrMapJoinOp());
      String taskTmpDir = mjCtx.getTaskTmpDir();
      if (uPlan.getPathToAliases().get(taskTmpDir) == null) {
        uPlan.getPathToAliases().put(taskTmpDir, new ArrayList<String>());
        uPlan.getPathToAliases().get(taskTmpDir).add(taskTmpDir);
        uPlan.getPathToPartitionInfo().put(taskTmpDir, new partitionDesc(mjCtx.getTTDesc(), null));
        uPlan.getAliasToWork().put(taskTmpDir, mjCtx.getRootMapJoinOp());
      }

      for (Task t : currTask.getParentTasks())
        t.addDependentTask(uTask);
View Full Code Here

    initialize(hconf, null);
  }

  private static MapOpCtx initObjectInspector(mapredWork conf, Configuration hconf, String onefile)
    throws HiveException, ClassNotFoundException, InstantiationException, IllegalAccessException, SerDeException {
    partitionDesc td = conf.getPathToPartitionInfo().get(onefile);
    LinkedHashMap<String, String> partSpec = td.getPartSpec();
    Properties tblProps = td.getProperties();

    Class sdclass = td.getDeserializerClass();
    if(sdclass == null) {
      String className = td.getSerdeClassName();
      if ((className == "") || (className == null)) {
        throw new HiveException("SerDe class or the SerDe class name is not set for table: "
            + td.getProperties().getProperty("name"));
      }
      sdclass = hconf.getClassByName(className);
    }

    String tableName = String.valueOf(tblProps.getProperty("name"));
View Full Code Here

      String taskTmpDir = mjCtx.getTaskTmpDir();
      tableDesc tt_desc = mjCtx.getTTDesc();
      assert plan.getPathToAliases().get(taskTmpDir) == null;
      plan.getPathToAliases().put(taskTmpDir, new ArrayList<String>());
      plan.getPathToAliases().get(taskTmpDir).add(taskTmpDir);
      plan.getPathToPartitionInfo().put(taskTmpDir, new partitionDesc(tt_desc, null));
      plan.getAliasToWork().put(taskTmpDir, mjCtx.getRootMapJoinOp());
    }

    tableDesc tt_desc = PlanUtils.getIntermediateFileTableDesc(
          PlanUtils.getFieldSchemasFromRowSchema(parent.getSchema(), "temporarycol"));
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.plan.partitionDesc

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.