Package org.apache.hadoop.hive.ql.plan

Examples of org.apache.hadoop.hive.ql.plan.tableDesc


      jc = new JobConf(hconf);
    }
    HiveOutputFormat<?, ?> hiveOutputFormat = null;
    Class<? extends Writable> outputClass = null;
    boolean isCompressed = conf.getCompressed();
    TableDesc tableInfo = conf.getTableInfo();
    try {
      Serializer serializer = (Serializer) tableInfo.getDeserializerClass().newInstance();
      serializer.initialize(null, tableInfo.getProperties());
      outputClass = serializer.getSerializedClass();
      hiveOutputFormat = conf.getTableInfo().getOutputFileFormatClass().newInstance();
    } catch (SerDeException e) {
      throw new HiveException(e);
    } catch (InstantiationException e) {
      throw new HiveException(e);
    } catch (IllegalAccessException e) {
      throw new HiveException(e);
    }

    for (String p : paths) {
      Path path = new Path(p);
      RecordWriter writer = HiveFileFormatUtils.getRecordWriter(
          jc, hiveOutputFormat, outputClass, isCompressed,
          tableInfo.getProperties(), path, reporter);
      writer.close(false);
      LOG.info("created empty bucket for enforcing bucketing at " + path);
    }
  }
View Full Code Here


  @SuppressWarnings("rawtypes")
  private static Path createDummyFileForEmptyTable(JobConf job, MapWork work,
      String hiveScratchDir, String alias, int sequenceNumber)
          throws IOException, InstantiationException, IllegalAccessException {

    TableDesc tableDesc = work.getAliasToPartnInfo().get(alias).getTableDesc();
    Properties props = tableDesc.getProperties();
    boolean nonNative = tableDesc.isNonNative();
    Class<? extends HiveOutputFormat> outFileFormat = tableDesc.getOutputFileFormatClass();

    if (nonNative) {
      // if this isn't a hive table we can't create an empty file for it.
      return null;
    }
View Full Code Here

    RowSchema inputRS = fsInput.getSchema();
    Operator<? extends OperatorDesc> tsMerge =
        OperatorFactory.get(TableScanDesc.class, inputRS);

    // Create a FileSink operator
    TableDesc ts = (TableDesc) fsInputDesc.getTableInfo().clone();
    FileSinkDesc fsOutputDesc = new FileSinkDesc(finalName, ts,
      conf.getBoolVar(ConfVars.COMPRESSRESULT));
    FileSinkOperator fsOutput = (FileSinkOperator) OperatorFactory.getAndMakeChild(
      fsOutputDesc, inputRS, tsMerge);
View Full Code Here

  private MapWork createMRWorkForMergingFiles (HiveConf conf,
    Operator<? extends OperatorDesc> topOp,  FileSinkDesc fsDesc) {

    ArrayList<String> aliases = new ArrayList<String>();
    String inputDir = fsDesc.getFinalDirName();
    TableDesc tblDesc = fsDesc.getTableInfo();
    aliases.add(inputDir); // dummy alias: just use the input path

    // constructing the default MapredWork
    MapredWork cMrPlan = GenMapRedUtils.getMapRedWorkFromConf(conf);
    MapWork cplan = cMrPlan.getMapWork();
View Full Code Here

   */
  private MapWork createRCFileMergeTask(FileSinkDesc fsInputDesc,
      String finalName, boolean hasDynamicPartitions) throws SemanticException {

    String inputDir = fsInputDesc.getFinalDirName();
    TableDesc tblDesc = fsInputDesc.getTableInfo();

    if (tblDesc.getInputFileFormatClass().equals(RCFileInputFormat.class)) {
      ArrayList<String> inputDirs = new ArrayList<String>();
      if (!hasDynamicPartitions
          && !isSkewedStoredAsDirs(fsInputDesc)) {
        inputDirs.add(inputDir);
      }
View Full Code Here

        truncateTblDesc.setLbCtx(lbCtx);

        addInputsOutputsAlterTable(tableName, partSpec);
        ddlWork.setNeedLock(true);
        TableDesc tblDesc = Utilities.getTableDesc(table);
        // Write the output to temporary directory and move it to the final location at the end
        // so the operation is atomic.
        String queryTmpdir = ctx.getExternalTmpFileURI(newTblPartLoc.toUri());
        truncateTblDesc.setOutputDir(queryTmpdir);
        LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, queryTmpdir, tblDesc,
View Full Code Here

      addInputsOutputsAlterTable(tableName, partSpec);
      DDLWork ddlWork = new DDLWork(getInputs(), getOutputs(), mergeDesc);
      ddlWork.setNeedLock(true);
      Task<? extends Serializable> mergeTask = TaskFactory.get(ddlWork, conf);
      TableDesc tblDesc = Utilities.getTableDesc(tblObj);
      String queryTmpdir = ctx.getExternalTmpFileURI(newTblPartLoc.toUri());
      mergeDesc.setOutputDir(queryTmpdir);
      LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, queryTmpdir, tblDesc,
          partSpec == null ? new HashMap<String, String>() : partSpec);
      ltd.setLbCtx(lbCtx);
View Full Code Here

    prop.setProperty(serdeConstants.SERIALIZATION_NULL_FORMAT, " ");
    String[] colTypes = schema.split("#");
    prop.setProperty("columns", colTypes[0]);
    prop.setProperty("columns.types", colTypes[1]);

    FetchWork fetch = new FetchWork(ctx.getResFile().toString(), new TableDesc(
        LazySimpleSerDe.class, TextInputFormat.class,
        IgnoreKeyTextOutputFormat.class, prop), -1);
    fetch.setSerializationNullFormat(" ");
    return (FetchTask) TaskFactory.get(fetch, conf);
  }
View Full Code Here

  public List<Task<? extends Serializable>> generateUpdateTasks() throws
    HiveException {
    hive = Hive.get(this.conf);
    for (LoadTableDesc ltd : loadTableWork) {
      TableDesc td = ltd.getTable();
      Table srcTable = hive.getTable(td.getTableName());
      List<Index> tblIndexes = srcTable.getAllIndexes((short)-1);
      Map<String, String> partSpec = ltd.getPartitionSpec();
      if (partSpec == null || partSpec.size() == 0) {
        //unpartitioned table, update whole index
        doIndexUpdate(tblIndexes);
View Full Code Here

    for (Path path : paths) {

      PartitionDesc part = HiveFileFormatUtils.getPartitionDescFromPathRecursively(
          pathToPartitionInfo, path, IOPrepareCache.get().allocatePartitionDescMap());
      TableDesc tableDesc = part.getTableDesc();
      if ((tableDesc != null) && tableDesc.isNonNative()) {
        return super.getSplits(job, numSplits);
      }

      // Use HiveInputFormat if any of the paths is not splittable
      Class inputFormatClass = part.getInputFileFormatClass();
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.plan.tableDesc

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.