Examples of loadTableDesc


Examples of org.apache.hadoop.hive.ql.plan.LoadTableDesc

      ddlWork.setNeedLock(true);
      Task<? extends Serializable> mergeTask = TaskFactory.get(ddlWork, conf);
      TableDesc tblDesc = Utilities.getTableDesc(tblObj);
      Path queryTmpdir = ctx.getExternalTmpPath(newTblPartLoc.toUri());
      mergeDesc.setOutputDir(queryTmpdir);
      LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, tblDesc,
          partSpec == null ? new HashMap<String, String>() : partSpec);
      ltd.setLbCtx(lbCtx);
      Task<MoveWork> moveTsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false),
          conf);
      mergeTask.addDependentTask(moveTsk);

      if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.loadTableDesc

    // create final load/move work
    List<loadTableDesc> loadTableWork =  new ArrayList<loadTableDesc>();
    List<loadFileDesc> loadFileWork = new ArrayList<loadFileDesc>();

    loadTableWork.add(new loadTableDesc(fromURI.toString(), getTmpFileName(), Utilities.getTableDesc(ts.tableHandle),
                                        (ts.partSpec != null) ? ts.partSpec : new HashMap<String, String> (),
                                        isOverWrite));

    if(rTask != null) {
      rTask.addDependentTask(TaskFactory.get(new moveWork(loadTableWork, loadFileWork, true), this.conf));
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.loadTableDesc

          }
        }
      }

      // Next we do this for tables and partitions
      loadTableDesc tbd = work.getLoadTableWork();
      if (tbd != null) {
        String mesg = "Loading data to table " + tbd.getTable().getTableName() +
        ((tbd.getPartitionSpec().size() > 0) ?
            " partition " + tbd.getPartitionSpec().toString() : "");
        String mesg_detail = " from " + tbd.getSourceDir();
        console.printInfo(mesg, mesg_detail);
        Table table = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tbd.getTable().getTableName());

        if (work.getCheckFileFormat()) {
          // Get all files from the src directory
          FileStatus [] dirs;
          ArrayList<FileStatus> files;
          FileSystem fs;
          try {
            fs = FileSystem.get(table.getDataLocation(),conf);
            dirs = fs.globStatus(new Path(tbd.getSourceDir()));
            files = new ArrayList<FileStatus>();
            for (int i=0; (dirs != null && i<dirs.length); i++) {
              files.addAll(Arrays.asList(fs.listStatus(dirs[i].getPath())));
              // We only check one file, so exit the loop when we have at least one.
              if (files.size()>0) break;
            }
          } catch (IOException e) {
            throw new HiveException("addFiles: filesystem error in check phase", e);
          }

          if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVECHECKFILEFORMAT)) {
            // Check if the file format of the file matches that of the table.
            boolean flag = HiveFileFormatUtils.checkInputFormat(fs, conf, tbd.getTable().getInputFileFormatClass(), files);
            if (!flag) {
              throw new HiveException(
                  "Wrong file format. Please check the file's format.");
            }
          }
        }

        if(tbd.getPartitionSpec().size() == 0) {
          db.loadTable(new Path(tbd.getSourceDir()), tbd.getTable().getTableName(), tbd.getReplace(), new Path(tbd.getTmpDir()));
          if (work.getOutputs() != null)
            work.getOutputs().add(new WriteEntity(table));
        } else {
          LOG.info("Partition is: " + tbd.getPartitionSpec().toString());
          db.loadPartition(new Path(tbd.getSourceDir()), tbd.getTable().getTableName(),
              tbd.getPartitionSpec(), tbd.getReplace(), new Path(tbd.getTmpDir()));
          Partition partn = db.getPartition(table, tbd.getPartitionSpec(), false);
          if (work.getOutputs() != null)
            work.getOutputs().add(new WriteEntity(partn));
        }
      }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.loadTableDesc

  /*
   * Does the move task involve moving to a local file system
   */
  public boolean isLocal() {
    loadTableDesc tbd = work.getLoadTableWork();
    if (tbd != null)
      return false;

    loadFileDesc lfd = work.getLoadFileWork();
    if (lfd != null) {
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.loadTableDesc

    }

    // create final load/move work

    String loadTmpPath = ctx.getExternalTmpFileURI(toURI);
    loadTableDesc loadTableWork = new loadTableDesc(fromURI.toString(), loadTmpPath,
                                        Utilities.getTableDesc(ts.tableHandle),
                                        (ts.partSpec != null) ? ts.partSpec :
                                        new HashMap<String, String> (),
                                        isOverWrite);
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.loadTableDesc

        currentTableId = this.destTableId;
        this.destTableId ++;

        // Create the work for moving the table
        this.loadTableWork.add
          (new loadTableDesc(queryTmpdir,
                             ctx.getExternalTmpFileURI(dest_path.toUri()),
                             table_desc,
                             new HashMap<String, String>()));
        if (!outputs.add(new WriteEntity(dest_tab))) {
          throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES.getMsg(dest_tab.getName()));
        }
        break;
      }
    case QBMetaData.DEST_PARTITION: {

        Partition dest_part = qbm.getDestPartitionForAlias(dest);
        dest_tab = dest_part.getTable();
        dest_path = dest_part.getPath()[0];
        queryTmpdir = ctx.getExternalTmpFileURI(dest_path.toUri());
        table_desc = Utilities.getTableDesc(dest_tab);

        this.idToTableNameMap.put(String.valueOf(this.destTableId), dest_tab.getName());
        currentTableId = this.destTableId;
        this.destTableId ++;

        this.loadTableWork.add
          (new loadTableDesc(queryTmpdir,
                             ctx.getExternalTmpFileURI(dest_path.toUri()),
                             table_desc, dest_part.getSpec()));
        if (!outputs.add(new WriteEntity(dest_part))) {
          throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES.getMsg(dest_tab.getName() + "@" + dest_part.getName()));
        }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.loadTableDesc

        currentTableId = this.destTableId;
        this.destTableId ++;

        // Create the work for moving the table
        this.loadTableWork.add
          (new loadTableDesc(queryTmpdir,
                             ctx.getExternalTmpFileURI(dest_path.toUri()),
                             table_desc,
                             new HashMap<String, String>()));
        outputs.add(new WriteEntity(dest_tab));
        break;
      }
    case QBMetaData.DEST_PARTITION: {

        Partition dest_part = qbm.getDestPartitionForAlias(dest);
        dest_tab = dest_part.getTable();
        dest_path = dest_part.getPath()[0];
        queryTmpdir = ctx.getExternalTmpFileURI(dest_path.toUri());
        table_desc = Utilities.getTableDesc(dest_tab);

        this.idToTableNameMap.put(String.valueOf(this.destTableId), dest_tab.getName());
        currentTableId = this.destTableId;
        this.destTableId ++;
       
        this.loadTableWork.add
          (new loadTableDesc(queryTmpdir,
                             ctx.getExternalTmpFileURI(dest_path.toUri()),
                             table_desc, dest_part.getSpec()));
        outputs.add(new WriteEntity(dest_part));
        break;
      }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.loadTableDesc

          }
        }
      }

      // Next we do this for tables and partitions
      loadTableDesc tbd = work.getLoadTableWork();
      if (tbd != null) {
        String mesg = "Loading data to table " + tbd.getTable().getTableName() +
        ((tbd.getPartitionSpec().size() > 0) ?
            " partition " + tbd.getPartitionSpec().toString() : "");
        String mesg_detail = " from " + tbd.getSourceDir();
        console.printInfo(mesg, mesg_detail);

        if (work.getCheckFileFormat()) {

          // Get all files from the src directory
          FileStatus [] dirs;
          ArrayList<FileStatus> files;
          FileSystem fs;
          try {
            fs = FileSystem.get
              (db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tbd.getTable().getTableName()).getDataLocation(),conf);
            dirs = fs.globStatus(new Path(tbd.getSourceDir()));
            files = new ArrayList<FileStatus>();
            for (int i=0; (dirs != null && i<dirs.length); i++) {
              files.addAll(Arrays.asList(fs.listStatus(dirs[i].getPath())));
              // We only check one file, so exit the loop when we have at least one.
              if (files.size()>0) break;
            }
          } catch (IOException e) {
            throw new HiveException("addFiles: filesystem error in check phase", e);
          }

          // Check if the file format of the file matches that of the table.
          boolean flag = HiveFileFormatUtils.checkInputFormat(fs, conf, tbd.getTable().getInputFileFormatClass(), files);
          if(!flag)
            throw new HiveException("Wrong file format. Please check the file's format.");
        }

        if(tbd.getPartitionSpec().size() == 0) {
          db.loadTable(new Path(tbd.getSourceDir()), tbd.getTable().getTableName(), tbd.getReplace(), new Path(tbd.getTmpDir()));
        } else {
          LOG.info("Partition is: " + tbd.getPartitionSpec().toString());
          db.loadPartition(new Path(tbd.getSourceDir()), tbd.getTable().getTableName(),
              tbd.getPartitionSpec(), tbd.getReplace(), new Path(tbd.getTmpDir()));
        }
      }

      return 0;
    }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.loadTableDesc

  /*
   * Does the move task involve moving to a local file system
   */
  public boolean isLocal() {
    loadTableDesc tbd = work.getLoadTableWork();
    if (tbd != null)
      return false;
   
    loadFileDesc lfd = work.getLoadFileWork();
    if (lfd != null) {
View Full Code Here

Examples of org.apache.hadoop.hive.ql.plan.loadTableDesc

    }
   
    // create final load/move work

    String loadTmpPath = ctx.getExternalTmpFileURI(toURI);
    loadTableDesc loadTableWork = new loadTableDesc(fromURI.toString(), loadTmpPath,
                                        Utilities.getTableDesc(ts.tableHandle),
                                        (ts.partSpec != null) ? ts.partSpec :
                                        new HashMap<String, String> (),
                                        isOverWrite);
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.