Package org.apache.hadoop.hive.ql.plan

Examples of org.apache.hadoop.hive.ql.plan.partitionDesc


      Table sourceTable = partsList.getSourceTable();
      inputs.add(new ReadEntity(sourceTable));
      TableDesc table = Utilities.getTableDesc(sourceTable);
      FetchWork work = new FetchWork(listP, partP, table);
      if (!work.getPartDesc().isEmpty()) {
        PartitionDesc part0 = work.getPartDesc().get(0);
        PlanUtils.configureInputJobPropertiesForStorageHandler(part0.getTableDesc());
        work.setSplitSample(splitSample);
      }
      return work;
    }
View Full Code Here


      return paths;
    }

    private void processAlias(MapWork work, String alias) {
      // Change the alias partition desc
      PartitionDesc aliasPartn = work.getAliasToPartnInfo().get(alias);
      changePartitionToMetadataOnly(aliasPartn);

      List<String> paths = getPathsForAlias(work, alias);
      for (String path : paths) {
        PartitionDesc partDesc = work.getPathToPartitionInfo().get(path);
        PartitionDesc newPartition = changePartitionToMetadataOnly(partDesc);
        Path fakePath = new Path(physicalContext.getContext().getMRTmpFileURI()
            + newPartition.getTableName()
            + encode(newPartition.getPartSpec()));
        work.getPathToPartitionInfo().remove(path);
        work.getPathToPartitionInfo().put(fakePath.getName(), newPartition);
        ArrayList<String> aliases = work.getPathToAliases().remove(path);
        work.getPathToAliases().put(fakePath.getName(), aliases);
      }
View Full Code Here

    // constructing the default MapredWork
    MapredWork cMrPlan = GenMapRedUtils.getMapRedWorkFromConf(conf);
    MapWork cplan = cMrPlan.getMapWork();
    cplan.getPathToAliases().put(inputDir, aliases);
    cplan.getPathToPartitionInfo().put(inputDir, new PartitionDesc(tblDesc, null));
    cplan.getAliasToWork().put(inputDir, topOp);
    cplan.setMapperCannotSpanPartns(true);

    return cplan;
  }
View Full Code Here

      work.setAliasToWork(
          new LinkedHashMap<String, Operator<? extends OperatorDesc>>());
      if (hasDynamicPartitions
          || isSkewedStoredAsDirs(fsInputDesc)) {
        work.getPathToPartitionInfo().put(inputDir,
            new PartitionDesc(tblDesc, null));
      }
      work.setListBucketingCtx(fsInputDesc.getLbCtx());

      return work;
    }
View Full Code Here

      }
    }

    while (iterPath.hasNext()) {
      Path nxt = iterPath.next();
      PartitionDesc prt = null;
      if (iterPartDesc != null) {
        prt = iterPartDesc.next();
      }
      FileSystem fs = nxt.getFileSystem(job);
      if (fs.exists(nxt)) {
View Full Code Here

      // metadata
      // operations
      job.set("mapred.input.dir", org.apache.hadoop.util.StringUtils.escapeString(currPath
          .toString()));

      PartitionDesc partDesc;
      if (currTbl == null) {
        partDesc = currPart;
      } else {
        partDesc = new PartitionDesc(currTbl, null);
      }

      Class<? extends InputFormat> formatter = partDesc.getInputFileFormatClass();
      inputFormat = getInputFormatFromCache(formatter, job);
      Utilities.copyTableJobPropertiesToConf(partDesc.getTableDesc(), job);
      InputSplit[] splits = inputFormat.getSplits(job, 1);
      FetchInputFormatSplit[] inputSplits = new FetchInputFormatSplit[splits.length];
      for (int i = 0; i < splits.length; i++) {
        inputSplits[i] = new FetchInputFormatSplit(splits[i], formatter.getName());
      }
      if (work.getSplitSample() != null) {
        inputSplits = splitSampling(work.getSplitSample(), inputSplits);
      }
      this.inputSplits = inputSplits;

      splitNum = 0;
      serde = partDesc.getDeserializerClass().newInstance();
      serde.initialize(job, partDesc.getOverlayedProperties());

      if (currTbl != null) {
        tblSerde = serde;
      }
      else {
        tblSerde = currPart.getTableDesc().getDeserializerClass().newInstance();
        tblSerde.initialize(job, currPart.getTableDesc().getProperties());
      }

      ObjectInspector outputOI = ObjectInspectorConverters.getConvertedOI(
          serde.getObjectInspector(),
          partitionedTableOI == null ? tblSerde.getObjectInspector() : partitionedTableOI, true);

      partTblObjectInspectorConverter = ObjectInspectorConverters.getConverter(
          serde.getObjectInspector(), outputOI);

      if (LOG.isDebugEnabled()) {
        LOG.debug("Creating fetchTask with deserializer typeinfo: "
            + serde.getObjectInspector().getTypeName());
        LOG.debug("deserializer properties: " + partDesc.getOverlayedProperties());
      }

      if (currPart != null) {
        getRowInspectorFromPartition(currPart, outputOI);
      }
View Full Code Here

        return getRowInspectorFromPartitionedTable(work.getTblDesc());
      }

      // Choose any partition. It's OI needs to be converted to the table OI
      // Whenever a new partition is being read, a new converter is being created
      PartitionDesc partition = listParts.get(0);
      Deserializer tblSerde = partition.getTableDesc().getDeserializerClass().newInstance();
      tblSerde.initialize(job, partition.getTableDesc().getProperties());

      partitionedTableOI = null;
      ObjectInspector tableOI = tblSerde.getObjectInspector();

      // Get the OI corresponding to all the partitions
View Full Code Here

      new HashMap<CombinePathInputFormat, CombineFilter>();
    Set<Path> poolSet = new HashSet<Path>();

    for (Path path : paths) {

      PartitionDesc part = HiveFileFormatUtils.getPartitionDescFromPathRecursively(
          pathToPartitionInfo, path, IOPrepareCache.get().allocatePartitionDescMap());
      TableDesc tableDesc = part.getTableDesc();
      if ((tableDesc != null) && tableDesc.isNonNative()) {
        return super.getSplits(job, numSplits);
      }

      // Use HiveInputFormat if any of the paths is not splittable
      Class inputFormatClass = part.getInputFileFormatClass();
      String inputFormatClassName = inputFormatClass.getName();
      InputFormat inputFormat = getInputFormatFromCache(inputFormatClass, job);
      String deserializerClassName = part.getDeserializerClass() == null ? null
          : part.getDeserializerClass().getName();

      // Since there is no easy way of knowing whether MAPREDUCE-1597 is present in the tree or not,
      // we use a configuration variable for the same
      if (this.mrwork != null && !this.mrwork.getHadoopSupportsSplittable()) {
        // The following code should be removed, once
View Full Code Here

        // extract all the inputFormatClass names for each chunk in the
        // CombinedSplit.
        Path[] ipaths = inputSplitShim.getPaths();
        if (ipaths.length > 0) {
          PartitionDesc part = HiveFileFormatUtils
              .getPartitionDescFromPathRecursively(pathToPartitionInfo,
                  ipaths[0], IOPrepareCache.get().getPartitionDescMap());
          inputFormatClassName = part.getInputFileFormatClass().getName();
        }
      }
    }
View Full Code Here

        Map<String, PartitionDesc> pathToPartitionInfo = Utilities
            .getMapWork(getJob()).getPathToPartitionInfo();

        // extract all the inputFormatClass names for each chunk in the
        // CombinedSplit.
        PartitionDesc part = HiveFileFormatUtils.getPartitionDescFromPathRecursively(pathToPartitionInfo,
            inputSplitShim.getPath(0), IOPrepareCache.get().getPartitionDescMap());

        // create a new InputFormat instance if this is the first time to see
        // this class
        inputFormatClassName = part.getInputFileFormatClass().getName();
      }

      out.writeUTF(inputFormatClassName);
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.plan.partitionDesc

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.