Package org.apache.hadoop.hive.ql.processors

Examples of org.apache.hadoop.hive.ql.processors.CommandProcessorResponse


      String errorMessage = "";
      String SQLState = null;

      try {
        CommandProcessor proc = CommandProcessorFactory.get(tokens[0]);
        CommandProcessorResponse response = null;
        if (proc != null) {
          if (proc instanceof Driver) {
            ((Driver)proc).destroy();
            isHiveQuery = true;
            response = driver.run(cmd);
          } else {
            isHiveQuery = false;
            response = proc.run(cmd_1);
          }

          ret = response.getResponseCode();
          SQLState = response.getSQLState();
          errorMessage = response.getErrorMessage();
        }
      } catch (Exception e) {
        HiveServerException ex = new HiveServerException();
        ex.setMessage("Error running query: " + e.toString());
        throw ex;
View Full Code Here


    hcatDriver.run("create database " + dbName);
    hcatDriver.run("use " + dbName);
    hcatDriver.run("create table " + tblName + " (a int) partitioned by (b string) stored as RCFILE");

    CommandProcessorResponse response;

    response = hcatDriver.run("alter table " + tblName + " add partition (b='2') location '/tmp'");
    assertEquals(0, response.getResponseCode());
    assertNull(response.getErrorMessage());

    response = hcatDriver.run("alter table " + tblName + " set fileformat INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' OUTPUTFORMAT " +
        "'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver 'mydriver' outputdriver 'yourdriver'");
    assertEquals(0, response.getResponseCode());
    assertNull(response.getErrorMessage());

    hcatDriver.run("drop table " + tblName);
    hcatDriver.run("drop database " + dbName);
  }
View Full Code Here

    hcatDriver.run("create database " + dbName);
    hcatDriver.run("use " + dbName);
    hcatDriver.run("create table " + tblName + " (a int) partitioned by (b string) stored as RCFILE");

    CommandProcessorResponse response;

    response = hcatDriver.run("alter table " + tblName + " add partition (b='2') location '/tmp'");
    assertEquals(0, response.getResponseCode());
    assertNull(response.getErrorMessage());

    response = hcatDriver.run("alter table " + tblName + " set fileformat INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' OUTPUTFORMAT " +
        "'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' inputdriver 'mydriver' outputdriver 'yourdriver'");
    assertEquals(0, response.getResponseCode());
    assertNull(response.getErrorMessage());

    hcatDriver.run("drop table " + tblName);
    hcatDriver.run("drop database " + dbName);
  }
View Full Code Here

        .thenReturn(true);
    cliDriver.setConf(conf);

    Driver proc = mock(Driver.class);

    CommandProcessorResponse cpr = mock(CommandProcessorResponse.class);
    when(cpr.getResponseCode()).thenReturn(0);
    when(proc.run(anyString())).thenReturn(cpr);

    // and then see what happens based on the provided schema
    when(proc.getSchema()).thenReturn(mockSchema);
View Full Code Here

    if ((partitionedBy != null) && (!partitionedBy.trim().isEmpty())) {
      createTable = createTable + "partitioned by (" + partitionedBy + ") ";
    }
    createTable = createTable + "stored as " + storageFormat();
    LOG.info("Creating table:\n {}", createTable);
    CommandProcessorResponse result = driver.run(createTable);
    int retCode = result.getResponseCode();
    if (retCode != 0) {
      throw new IOException("Failed to create table. [" + createTable + "], return code from hive driver : [" + retCode + " " + result.getErrorMessage() + "]");
    }
  }
View Full Code Here

      createTable = createTable + "partitioned by (" + partitionedBy + ") ";
    }
    createTable = createTable + "stored as RCFILE tblproperties('hcat.isd'='org.apache.hcatalog.rcfile.RCFileInputDriver'," +
      "'hcat.osd'='org.apache.hcatalog.rcfile.RCFileOutputDriver') ";
    LOG.info("Creating table:\n {}", createTable);
    CommandProcessorResponse result = driver.run(createTable);
    int retCode = result.getResponseCode();
    if (retCode != 0) {
      throw new IOException("Failed to create table. [" + createTable + "], return code from hive driver : [" + retCode + " " + result.getErrorMessage() + "]");
    }
  }
View Full Code Here

    perfLogger.PerfLogEnd(LOG, PerfLogger.RELEASE_LOCKS);
  }

  public CommandProcessorResponse run(String command) throws CommandNeedRetryException {
    CommandProcessorResponse cpr = runInternal(command);
    if(cpr.getResponseCode() == 0) {
      return cpr;
    }
    SessionState ss = SessionState.get();
    if(ss == null) {
      return cpr;
    }
    MetaDataFormatter mdf = MetaDataFormatUtils.getFormatter(ss.getConf());
    if(!(mdf instanceof JsonMetaDataFormatter)) {
      return cpr;
    }
    /*Here we want to encode the error in machine readable way (e.g. JSON)
     * Ideally, errorCode would always be set to a canonical error defined in ErrorMsg.
     * In practice that is rarely the case, so the messy logic below tries to tease
     * out canonical error code if it can.  Exclude stack trace from output when
     * the error is a specific/expected one.
     * It's written to stdout for backward compatibility (WebHCat consumes it).*/
    try {
      if(downstreamError == null) {
        mdf.error(ss.out, errorMessage, cpr.getResponseCode(), SQLState);
        return cpr;
      }
      ErrorMsg canonicalErr = ErrorMsg.getErrorMsg(cpr.getResponseCode());
      if(canonicalErr != null && canonicalErr != ErrorMsg.GENERIC_ERROR) {
        /*Some HiveExceptions (e.g. SemanticException) don't set
          canonical ErrorMsg explicitly, but there is logic
          (e.g. #compile()) to find an appropriate canonical error and
          return its code as error code. In this case we want to
          preserve it for downstream code to interpret*/
        mdf.error(ss.out, errorMessage, cpr.getResponseCode(), SQLState, null);
        return cpr;
      }
      if(downstreamError instanceof HiveException) {
        HiveException rc = (HiveException) downstreamError;
        mdf.error(ss.out, errorMessage,
View Full Code Here

    errorMessage = null;
    SQLState = null;
    downstreamError = null;

    if (!validateConfVariables()) {
      return new CommandProcessorResponse(12, errorMessage, SQLState);
    }

    HiveDriverRunHookContext hookContext = new HiveDriverRunHookContextImpl(conf, command);
    // Get all the driver run hooks and pre-execute them.
    List<HiveDriverRunHook> driverRunHooks;
    try {
      driverRunHooks = getHooks(HiveConf.ConfVars.HIVE_DRIVER_RUN_HOOKS,
          HiveDriverRunHook.class);
      for (HiveDriverRunHook driverRunHook : driverRunHooks) {
          driverRunHook.preDriverRun(hookContext);
      }
    } catch (Exception e) {
      errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e);
      SQLState = ErrorMsg.findSQLState(e.getMessage());
      downstreamError = e;
      console.printError(errorMessage + "\n"
          + org.apache.hadoop.util.StringUtils.stringifyException(e));
      return new CommandProcessorResponse(12, errorMessage, SQLState);
    }

    // Reset the perf logger
    PerfLogger perfLogger = PerfLogger.getPerfLogger(true);
    perfLogger.PerfLogBegin(LOG, PerfLogger.DRIVER_RUN);
    perfLogger.PerfLogBegin(LOG, PerfLogger.TIME_TO_SUBMIT);

    int ret;
    synchronized (compileMonitor) {
      ret = compile(command);
    }
    if (ret != 0) {
      releaseLocks(ctx.getHiveLocks());
      return new CommandProcessorResponse(ret, errorMessage, SQLState);
    }

    boolean requireLock = false;
    boolean ckLock = checkLockManager();

    if (ckLock) {
      boolean lockOnlyMapred = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_LOCK_MAPRED_ONLY);
      if(lockOnlyMapred) {
        Queue<Task<? extends Serializable>> taskQueue = new LinkedList<Task<? extends Serializable>>();
        taskQueue.addAll(plan.getRootTasks());
        while (taskQueue.peek() != null) {
          Task<? extends Serializable> tsk = taskQueue.remove();
          requireLock = requireLock || tsk.requireLock();
          if(requireLock) {
            break;
          }
          if (tsk instanceof ConditionalTask) {
            taskQueue.addAll(((ConditionalTask)tsk).getListTasks());
          }
          if(tsk.getChildTasks()!= null) {
            taskQueue.addAll(tsk.getChildTasks());
          }
          // does not add back up task here, because back up task should be the same
          // type of the original task.
        }
      } else {
        requireLock = true;
      }
    }

    if (requireLock) {
      ret = acquireReadWriteLocks();
      if (ret != 0) {
        releaseLocks(ctx.getHiveLocks());
        return new CommandProcessorResponse(ret, errorMessage, SQLState);
      }
    }

    ret = execute();
    if (ret != 0) {
      //if needRequireLock is false, the release here will do nothing because there is no lock
      releaseLocks(ctx.getHiveLocks());
      return new CommandProcessorResponse(ret, errorMessage, SQLState);
    }

    //if needRequireLock is false, the release here will do nothing because there is no lock
    releaseLocks(ctx.getHiveLocks());

    perfLogger.PerfLogEnd(LOG, PerfLogger.DRIVER_RUN);
    perfLogger.close(LOG, plan);

    // Take all the driver run hooks and post-execute them.
    try {
      for (HiveDriverRunHook driverRunHook : driverRunHooks) {
          driverRunHook.postDriverRun(hookContext);
      }
    } catch (Exception e) {
      errorMessage = "FAILED: Hive Internal Error: " + Utilities.getNameMessage(e);
      SQLState = ErrorMsg.findSQLState(e.getMessage());
      downstreamError = e;
      console.printError(errorMessage + "\n"
          + org.apache.hadoop.util.StringUtils.stringifyException(e));
      return new CommandProcessorResponse(12, errorMessage, SQLState);
    }

    return new CommandProcessorResponse(ret);
  }
View Full Code Here

      sb.append(':');
      sb.append(port);
    }

    hcatDriver.run("drop table test_table");
    CommandProcessorResponse response = hcatDriver
      .run("create table test_table(key int, value string) STORED BY " +
        "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'"
        + "TBLPROPERTIES ('hbase.columns.mapping'=':key,cf1:val')");

    assertEquals(0, response.getResponseCode());

    HBaseAdmin hAdmin = new HBaseAdmin(getHbaseConf());
    boolean doesTableExist = hAdmin.tableExists("test_table");
    assertTrue(doesTableExist);
View Full Code Here

    String tableQuery = "CREATE TABLE " + databaseName + "." + tableName
        + "(key string, testqualifier1 string, testqualifier2 string) STORED BY " +
        "'org.apache.hcatalog.hbase.HBaseHCatStorageHandler'"
        + "TBLPROPERTIES ('hbase.columns.mapping'=':key,testFamily:testQualifier1,testFamily:testQualifier2')";

    CommandProcessorResponse responseOne = hcatDriver.run(dbquery);
    assertEquals(0, responseOne.getResponseCode());
    CommandProcessorResponse responseTwo = hcatDriver.run(tableQuery);
    assertEquals(0, responseTwo.getResponseCode());

    HBaseAdmin hAdmin = new HBaseAdmin(getHbaseConf());
    boolean doesTableExist = hAdmin.tableExists(hbaseTableName);
    assertTrue(doesTableExist);

    populateHBaseTable(hbaseTableName, 5);
    Configuration conf = new Configuration(hcatConf);
    conf.set(HCatConstants.HCAT_KEY_HIVE_CONF,
        HCatUtil.serialize(getHiveConf().getAllProperties()));

    // output settings
    Path outputDir = new Path(getTestDir(), "mapred/testHbaseTableMRRead");
    FileSystem fs = getFileSystem();
    if (fs.exists(outputDir)) {
      fs.delete(outputDir, true);
    }
    // create job
    Job job = new Job(conf, "hbase-mr-read-test");
    job.setJarByClass(this.getClass());
    job.setMapperClass(MapReadHTable.class);
    MapReadHTable.resetCounters();

    job.setInputFormatClass(HCatInputFormat.class);
    HCatInputFormat.setInput(job.getConfiguration(), databaseName, tableName);
    job.setOutputFormatClass(TextOutputFormat.class);
    TextOutputFormat.setOutputPath(job, outputDir);
    job.setMapOutputKeyClass(BytesWritable.class);
    job.setMapOutputValueClass(Text.class);
    job.setOutputKeyClass(BytesWritable.class);
    job.setOutputValueClass(Text.class);
    job.setNumReduceTasks(0);
    assertTrue(job.waitForCompletion(true));
    // Note: These asserts only works in case of LocalJobRunner as they run in same jvm.
    // If using MiniMRCluster, the tests will have to be modified.
    assertFalse(MapReadHTable.error);
    assertEquals(1, MapReadHTable.count);

    String dropTableQuery = "DROP TABLE " + hbaseTableName;
    CommandProcessorResponse responseThree = hcatDriver.run(dropTableQuery);
    assertEquals(0, responseThree.getResponseCode());

    boolean isHbaseTableThere = hAdmin.tableExists(hbaseTableName);
    assertFalse(isHbaseTableThere);

    String dropDB = "DROP DATABASE " + databaseName;
    CommandProcessorResponse responseFour = hcatDriver.run(dropDB);
    assertEquals(0, responseFour.getResponseCode());
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.processors.CommandProcessorResponse

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.