Package org.apache.hadoop.hive.ql.processors

Examples of org.apache.hadoop.hive.ql.processors.CommandProcessorResponse


    SQLState = null;

    int ret = compile(command);
    if (ret != 0) {
      releaseLocks(ctx.getHiveLocks());
      return new CommandProcessorResponse(ret, errorMessage, SQLState);
    }

    boolean requireLock = false;
    boolean ckLock = checkLockManager();

    if (ckLock) {
      boolean lockOnlyMapred = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_LOCK_MAPRED_ONLY);
      if(lockOnlyMapred) {
        Queue<Task<? extends Serializable>> taskQueue = new LinkedList<Task<? extends Serializable>>();
        taskQueue.addAll(plan.getRootTasks());
        while (taskQueue.peek() != null) {
          Task<? extends Serializable> tsk = taskQueue.remove();
          requireLock = requireLock || tsk.requireLock();
          if(requireLock) {
            break;
          }
          if (tsk instanceof ConditionalTask) {
            taskQueue.addAll(((ConditionalTask)tsk).getListTasks());
          }
          if(tsk.getChildTasks()!= null) {
            taskQueue.addAll(tsk.getChildTasks());
          }
          // does not add back up task here, because back up task should be the same
          // type of the original task.
        }
      } else {
        requireLock = true;
      }
    }

    if (requireLock) {
      ret = acquireReadWriteLocks();
      if (ret != 0) {
        releaseLocks(ctx.getHiveLocks());
        return new CommandProcessorResponse(ret, errorMessage, SQLState);
      }
    }

    ret = execute();
    if (ret != 0) {
      //if needRequireLock is false, the release here will do nothing because there is no lock
      releaseLocks(ctx.getHiveLocks());
      return new CommandProcessorResponse(ret, errorMessage, SQLState);
    }

    //if needRequireLock is false, the release here will do nothing because there is no lock
    releaseLocks(ctx.getHiveLocks());
    return new CommandProcessorResponse(ret);
  }
View Full Code Here


    return run(null, true);
  }

  public CommandProcessorResponse run(String command, boolean alreadyCompiled)
        throws CommandNeedRetryException {
    CommandProcessorResponse cpr = runInternal(command, alreadyCompiled);
    if(cpr.getResponseCode() == 0) {
      return cpr;
    }
    SessionState ss = SessionState.get();
    if(ss == null) {
      return cpr;
    }
    MetaDataFormatter mdf = MetaDataFormatUtils.getFormatter(ss.getConf());
    if(!(mdf instanceof JsonMetaDataFormatter)) {
      return cpr;
    }
    /*Here we want to encode the error in machine readable way (e.g. JSON)
     * Ideally, errorCode would always be set to a canonical error defined in ErrorMsg.
     * In practice that is rarely the case, so the messy logic below tries to tease
     * out canonical error code if it can.  Exclude stack trace from output when
     * the error is a specific/expected one.
     * It's written to stdout for backward compatibility (WebHCat consumes it).*/
    try {
      if(downstreamError == null) {
        mdf.error(ss.out, errorMessage, cpr.getResponseCode(), SQLState);
        return cpr;
      }
      ErrorMsg canonicalErr = ErrorMsg.getErrorMsg(cpr.getResponseCode());
      if(canonicalErr != null && canonicalErr != ErrorMsg.GENERIC_ERROR) {
        /*Some HiveExceptions (e.g. SemanticException) don't set
          canonical ErrorMsg explicitly, but there is logic
          (e.g. #compile()) to find an appropriate canonical error and
          return its code as error code. In this case we want to
          preserve it for downstream code to interpret*/
        mdf.error(ss.out, errorMessage, cpr.getResponseCode(), SQLState, null);
        return cpr;
      }
      if(downstreamError instanceof HiveException) {
        HiveException rc = (HiveException) downstreamError;
        mdf.error(ss.out, errorMessage,
View Full Code Here

    }
    return cpr;
  }

  public CommandProcessorResponse compileAndRespond(String command) {
    return new CommandProcessorResponse(compileInternal(command),
        errorMessage, SQLState);
  }
View Full Code Here

    errorMessage = null;
    SQLState = null;
    downstreamError = null;

    if (!validateConfVariables()) {
      return new CommandProcessorResponse(12, errorMessage, SQLState);
    }

    HiveDriverRunHookContext hookContext = new HiveDriverRunHookContextImpl(conf, command);
    // Get all the driver run hooks and pre-execute them.
    List<HiveDriverRunHook> driverRunHooks;
    try {
      driverRunHooks = getHooks(HiveConf.ConfVars.HIVE_DRIVER_RUN_HOOKS,
          HiveDriverRunHook.class);
      for (HiveDriverRunHook driverRunHook : driverRunHooks) {
          driverRunHook.preDriverRun(hookContext);
      }
    } catch (Exception e) {
      errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e);
      SQLState = ErrorMsg.findSQLState(e.getMessage());
      downstreamError = e;
      console.printError(errorMessage + "\n"
          + org.apache.hadoop.util.StringUtils.stringifyException(e));
      return new CommandProcessorResponse(12, errorMessage, SQLState);
    }

    // Reset the perf logger
    PerfLogger perfLogger = PerfLogger.getPerfLogger(true);
    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.DRIVER_RUN);
    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TIME_TO_SUBMIT);

    int ret;
    boolean requireLock = false;
    boolean ckLock = false;
    try {
      ckLock = checkConcurrency();
      createTxnManager();
    } catch (SemanticException e) {
      errorMessage = "FAILED: Error in semantic analysis: " + e.getMessage();
      SQLState = ErrorMsg.findSQLState(e.getMessage());
      downstreamError = e;
      console.printError(errorMessage, "\n"
          + org.apache.hadoop.util.StringUtils.stringifyException(e));
      ret = 10;
      return new CommandProcessorResponse(ret, errorMessage, SQLState);
    }
    ret = recordValidTxns();
    if (ret != 0) return new CommandProcessorResponse(ret, errorMessage, SQLState);

    if (!alreadyCompiled) {
      ret = compileInternal(command);
      if (ret != 0) {
        return new CommandProcessorResponse(ret, errorMessage, SQLState);
      }
    }

    // the reason that we set the txn manager for the cxt here is because each
    // query has its own ctx object. The txn mgr is shared across the
    // same instance of Driver, which can run multiple queries.
    ctx.setHiveTxnManager(txnMgr);

    if (ckLock) {
      boolean lockOnlyMapred = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_LOCK_MAPRED_ONLY);
      if(lockOnlyMapred) {
        Queue<Task<? extends Serializable>> taskQueue = new LinkedList<Task<? extends Serializable>>();
        taskQueue.addAll(plan.getRootTasks());
        while (taskQueue.peek() != null) {
          Task<? extends Serializable> tsk = taskQueue.remove();
          requireLock = requireLock || tsk.requireLock();
          if(requireLock) {
            break;
          }
          if (tsk instanceof ConditionalTask) {
            taskQueue.addAll(((ConditionalTask)tsk).getListTasks());
          }
          if(tsk.getChildTasks()!= null) {
            taskQueue.addAll(tsk.getChildTasks());
          }
          // does not add back up task here, because back up task should be the same
          // type of the original task.
        }
      } else {
        requireLock = true;
      }
    }

    if (requireLock) {
      ret = acquireReadWriteLocks();
      if (ret != 0) {
        try {
          releaseLocks(ctx.getHiveLocks());
        } catch (LockException e) {
          // Not much to do here
        }
        return new CommandProcessorResponse(ret, errorMessage, SQLState);
      }
    }

    ret = execute();
    if (ret != 0) {
      //if needRequireLock is false, the release here will do nothing because there is no lock
      try {
        releaseLocks(ctx.getHiveLocks());
      } catch (LockException e) {
        // Nothing to do here
      }
      return new CommandProcessorResponse(ret, errorMessage, SQLState);
    }

    //if needRequireLock is false, the release here will do nothing because there is no lock
    try {
      releaseLocks(ctx.getHiveLocks());
    } catch (LockException e) {
      errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e);
      SQLState = ErrorMsg.findSQLState(e.getMessage());
      downstreamError = e;
      console.printError(errorMessage + "\n"
          + org.apache.hadoop.util.StringUtils.stringifyException(e));
      return new CommandProcessorResponse(12, errorMessage, SQLState);
    }

    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.DRIVER_RUN);
    perfLogger.close(LOG, plan);

    // Take all the driver run hooks and post-execute them.
    try {
      for (HiveDriverRunHook driverRunHook : driverRunHooks) {
          driverRunHook.postDriverRun(hookContext);
      }
    } catch (Exception e) {
      errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e);
      SQLState = ErrorMsg.findSQLState(e.getMessage());
      downstreamError = e;
      console.printError(errorMessage + "\n"
          + org.apache.hadoop.util.StringUtils.stringifyException(e));
      return new CommandProcessorResponse(12, errorMessage, SQLState);
    }

    return new CommandProcessorResponse(ret);
  }
View Full Code Here

    SessionState session = new SessionState(getHiveConf());
    SessionState.start(session);
    Driver driver = new Driver(session.getConf(), getUser(), null);

    // compile the query
    CommandProcessorResponse compilerStatus = driver
        .compileAndRespond(queryStr);
    if (compilerStatus.getResponseCode() != 0) {
      String errMsg = compilerStatus.getErrorMessage();
      if (errMsg.contains(HiveAuthzConf.HIVE_SENTRY_PRIVILEGE_ERROR_MESSAGE)) {
        printMissingPerms(getHiveConf().get(
            HiveAuthzConf.HIVE_SENTRY_AUTH_ERRORS));
      }
      throw new SemanticException("Compilation error: "
          + compilerStatus.getErrorMessage());
    }
    driver.close();
    System.out
        .println("User " + getUser() + " has privileges to run the query");
  }
View Full Code Here

    @Test
    public void testTableCreateDrop() throws Exception {
        Initialize();

        hcatDriver.run("drop table test_table");
        CommandProcessorResponse response = hcatDriver
                .run("create table test_table(key int, value string) STORED BY " +
                         "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'"
                    + "TBLPROPERTIES ('hbase.columns.mapping'=':key,cf1:val')");

        assertEquals(0, response.getResponseCode());

        HBaseAdmin hAdmin = new HBaseAdmin(getHbaseConf());
        boolean doesTableExist = hAdmin.tableExists("test_table");

        assertTrue(doesTableExist);
View Full Code Here

    @Test
    public void testTableCreateDropDifferentCase() throws Exception {
        Initialize();

        hcatDriver.run("drop table test_Table");
        CommandProcessorResponse response = hcatDriver
                .run("create table test_Table(key int, value string) STORED BY " +
                             "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'"
                    + "TBLPROPERTIES ('hbase.columns.mapping'=':key,cf1:val')");

        assertEquals(0, response.getResponseCode());

        //HBase table gets created with lower case unless specified as a table property.
        HBaseAdmin hAdmin = new HBaseAdmin(getHbaseConf());
        boolean doesTableExist = hAdmin.tableExists("test_table");
View Full Code Here

    @Test
    public void testTableCreateDropCaseSensitive() throws Exception {
        Initialize();

        hcatDriver.run("drop table test_Table");
        CommandProcessorResponse response = hcatDriver
                .run("create table test_Table(key int, value string) STORED BY " +
                             "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'"
                    + "TBLPROPERTIES ('hbase.columns.mapping'=':key,cf1:val'," +
                    " 'hbase.table.name'='CaseSensitiveTable')");

        assertEquals(0, response.getResponseCode());

        HBaseAdmin hAdmin = new HBaseAdmin(getHbaseConf());
        boolean doesTableExist = hAdmin.tableExists("CaseSensitiveTable");

        assertTrue(doesTableExist);
View Full Code Here

    @Test
    public void testTableDropNonExistent() throws Exception {
        Initialize();

        hcatDriver.run("drop table mytable");
        CommandProcessorResponse response = hcatDriver
                .run("create table mytable(key int, value string) STORED BY " +
                     "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'"
                    + "TBLPROPERTIES ('hbase.columns.mapping'=':key,cf1:val')");

        assertEquals(0, response.getResponseCode());

        HBaseAdmin hAdmin = new HBaseAdmin(getHbaseConf());
        boolean doesTableExist = hAdmin.tableExists("mytable");
        assertTrue(doesTableExist);

        //Now delete the table from hbase
        if (hAdmin.isTableEnabled("mytable")) {
            hAdmin.disableTable("mytable");
        }
        hAdmin.deleteTable("mytable");
        doesTableExist = hAdmin.tableExists("mytable");
        assertTrue(doesTableExist == false);

        CommandProcessorResponse responseTwo = hcatDriver.run("drop table mytable");
        assertTrue(responseTwo.getResponseCode() == 0);

    }
View Full Code Here

        hAdmin.createTable(tableDesc);
        boolean doesTableExist = hAdmin.tableExists(tableName);
        assertTrue(doesTableExist);

        hcatDriver.run("drop table mytabletwo");
        CommandProcessorResponse response = hcatDriver
                .run("create external table mytabletwo(key int, valueone string, valuetwo string) STORED BY " +
                     "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'"
                    + "TBLPROPERTIES ('hbase.columns.mapping'=':key,familyone:val,familytwo:val'," +
                    "'hbase.table.name'='testTable')");

        assertEquals(0, response.getResponseCode());

    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.processors.CommandProcessorResponse

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.