Package org.apache.hadoop.hive.ql

Examples of org.apache.hadoop.hive.ql.Context


      String cmd = "select key from " + tblName;
      drv.compile(cmd);

      //create scratch dir
      Context ctx = new Context(newJob);
      Path emptyScratchDir = ctx.getMRTmpPath();
      FileSystem fileSys = emptyScratchDir.getFileSystem(newJob);
      fileSys.mkdirs(emptyScratchDir);

      QueryPlan plan = drv.getPlan();
      MapRedTask selectTask = (MapRedTask)plan.getRootTasks().get(0);

      List<Path> inputPaths = Utilities.getInputPaths(newJob, selectTask.getWork().getMapWork(), emptyScratchDir, ctx);
      Utilities.setInputPaths(newJob, inputPaths);

      Utilities.setMapRedWork(newJob, selectTask.getWork(), ctx.getMRTmpPath());

      CombineHiveInputFormat combineInputFormat = ReflectionUtils.newInstance(
          CombineHiveInputFormat.class, newJob);

      InputSplit[] retSplits = combineInputFormat.getSplits(newJob, 1);
View Full Code Here


      // get current mapred work and its local work
      MapredWork mapredWork = (MapredWork) currTask.getWork();
      MapredLocalWork localwork = mapredWork.getMapWork().getMapLocalWork();
      if (localwork != null) {
        // get the context info and set up the shared tmp URI
        Context ctx = physicalContext.getContext();
        Path tmpPath = Utilities.generateTmpPath(ctx.getLocalTmpPath(), currTask.getId());
        localwork.setTmpPath(tmpPath);
        mapredWork.getMapWork().setTmpHDFSPath(Utilities.generateTmpPath(
          ctx.getMRTmpPath(), currTask.getId()));
        // create a task for this local work; right now, this local work is shared
        // by the original MapredTask and this new generated MapredLocalTask.
        MapredLocalTask localTask = (MapredLocalTask) TaskFactory.get(localwork, physicalContext
            .getParseContext().getConf());
View Full Code Here

  private ASTNode genRewrittenTree(String rewrittenQuery) throws SemanticException {
    ASTNode rewrittenTree;
    // Parse the rewritten query string
    try {
      ctx = new Context(conf);
    } catch (IOException e) {
      throw new SemanticException(ErrorMsg.COLUMNSTATSCOLLECTOR_IO_ERROR.getMsg());
    }
    ctx.setCmd(rewrittenQuery);
    ParseDriver pd = new ParseDriver();
View Full Code Here

   * @return Returns 0 when execution succeeds and above 0 if it fails.
   * @throws HiveException
   *           Throws this exception if an unexpected error occurs.
   */
  private int showLocks(ShowLocksDesc showLocks) throws HiveException {
    Context ctx = driverContext.getCtx();
    HiveTxnManager txnManager = ctx.getHiveTxnManager();
    HiveLockManager lockMgr = txnManager.getLockManager();

    if (txnManager.useNewShowLocksFormat()) return showLocksNewFormat(showLocks, lockMgr);

    boolean isExt = showLocks.isExt();
View Full Code Here

   * @return Returns 0 when execution succeeds and above 0 if it fails.
   * @throws HiveException
   *           Throws this exception if an unexpected error occurs.
   */
  private int lockTable(LockTableDesc lockTbl) throws HiveException {
    Context ctx = driverContext.getCtx();
    HiveTxnManager txnManager = ctx.getHiveTxnManager();
    if (!txnManager.supportsExplicitLock()) {
      throw new HiveException(ErrorMsg.LOCK_REQUEST_UNSUPPORTED,
          conf.getVar(HiveConf.ConfVars.HIVE_TXN_MANAGER));
    }
    HiveLockManager lockMgr = txnManager.getLockManager();
View Full Code Here

   * @return Returns 0 when execution succeeds and above 0 if it fails.
   * @throws HiveException
   *           Throws this exception if an unexpected error occurs.
   */
  private int lockDatabase(LockDatabaseDesc lockDb) throws HiveException {
    Context ctx = driverContext.getCtx();
    HiveTxnManager txnManager = ctx.getHiveTxnManager();
    if (!txnManager.supportsExplicitLock()) {
      throw new HiveException(ErrorMsg.LOCK_REQUEST_UNSUPPORTED,
          conf.getVar(HiveConf.ConfVars.HIVE_TXN_MANAGER));
    }
    HiveLockManager lockMgr = txnManager.getLockManager();
View Full Code Here

   * @return Returns 0 when execution succeeds and above 0 if it fails.
   * @throws HiveException
   *           Throws this exception if an unexpected error occurs.
   */
  private int unlockDatabase(UnlockDatabaseDesc unlockDb) throws HiveException {
    Context ctx = driverContext.getCtx();
    HiveTxnManager txnManager = ctx.getHiveTxnManager();
    if (!txnManager.supportsExplicitLock()) {
      throw new HiveException(ErrorMsg.LOCK_REQUEST_UNSUPPORTED,
          conf.getVar(HiveConf.ConfVars.HIVE_TXN_MANAGER));
    }
    HiveLockManager lockMgr = txnManager.getLockManager();
View Full Code Here

   * @return Returns 0 when execution succeeds and above 0 if it fails.
   * @throws HiveException
   *           Throws this exception if an unexpected error occurs.
   */
  private int unlockTable(UnlockTableDesc unlockTbl) throws HiveException {
    Context ctx = driverContext.getCtx();
    HiveTxnManager txnManager = ctx.getHiveTxnManager();
    if (!txnManager.supportsExplicitLock()) {
      throw new HiveException(ErrorMsg.LOCK_REQUEST_UNSUPPORTED,
          conf.getVar(HiveConf.ConfVars.HIVE_TXN_MANAGER));
    }
    HiveLockManager lockMgr = txnManager.getLockManager();
View Full Code Here

public class GlobalLimitOptimizer implements Transform {

  private final Log LOG = LogFactory.getLog(GlobalLimitOptimizer.class.getName());

  public ParseContext transform(ParseContext pctx) throws SemanticException {
    Context ctx = pctx.getContext();
    Map<String, Operator<? extends OperatorDesc>> topOps = pctx.getTopOps();
    GlobalLimitCtx globalLimitCtx = pctx.getGlobalLimitCtx();
    Map<TableScanOperator, ExprNodeDesc> opToPartPruner = pctx.getOpToPartPruner();
    Map<String, SplitSample> nameToSplitSample = pctx.getNameToSplitSample();
    Map<TableScanOperator, Table> topToTable = pctx.getTopToTable();

    QB qb = pctx.getQB();
    HiveConf conf = pctx.getConf();
    QBParseInfo qbParseInfo = qb.getParseInfo();

    // determine the query qualifies reduce input size for LIMIT
    // The query only qualifies when there are only one top operator
    // and there is no transformer or UDTF and no block sampling
    // is used.
    if (ctx.getTryCount() == 0 && topOps.size() == 1
        && !globalLimitCtx.ifHasTransformOrUDTF() &&
        nameToSplitSample.isEmpty()) {

      // Here we recursively check:
      // 1. whether there are exact one LIMIT in the query
View Full Code Here

  @SuppressWarnings({"nls", "unchecked"})
  public void compile(final ParseContext pCtx, final List<Task<? extends Serializable>> rootTasks,
      final HashSet<ReadEntity> inputs, final HashSet<WriteEntity> outputs) throws SemanticException {

    Context ctx = pCtx.getContext();
    GlobalLimitCtx globalLimitCtx = pCtx.getGlobalLimitCtx();
    QB qb = pCtx.getQB();
    List<Task<MoveWork>> mvTask = new ArrayList<Task<MoveWork>>();

    List<LoadTableDesc> loadTableWork = pCtx.getLoadTableWork();
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.Context

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.