Package org.apache.hadoop.hive.ql.session

Examples of org.apache.hadoop.hive.ql.session.SessionState


          continue;
        }

        ret = processCmd(command);
        //wipe cli query state
        SessionState ss = SessionState.get();
        ss.setCommandType(null);
        command = "";
        lastRet = ret;
        boolean ignoreErrors = HiveConf.getBoolVar(conf, HiveConf.ConfVars.CLIIGNOREERRORS);
        if (ret != 0 && !ignoreErrors) {
          CommandProcessorFactory.clean((HiveConf) conf);
View Full Code Here


   *
   * @return return value of execute()
   */
  public int executeTask() {
    try {
      SessionState ss = SessionState.get();
      this.setStarted();
      if (ss != null) {
        ss.getHiveHistory().logPlanProgress(queryPlan);
      }
      int retval = execute(driverContext);
      this.setDone();
      if (ss != null) {
        ss.getHiveHistory().logPlanProgress(queryPlan);
      }
      return retval;
    } catch (IOException e) {
      throw new RuntimeException(e.getMessage());
    }
View Full Code Here

        // rj.getJobState() again and we do not want to do an extra RPC call
        initializing = false;
      }

      if (!initOutputPrinted) {
        SessionState ss = SessionState.get();

        String logMapper;
        String logReducer;

        TaskReport[] mappers = jc.getMapTaskReports(rj.getJobID());
        if (mappers == null) {
          logMapper = "no information for number of mappers; ";
        } else {
          numMap = mappers.length;
          if (ss != null) {
            ss.getHiveHistory().setTaskProperty(SessionState.get().getQueryId(), getId(),
              Keys.TASK_NUM_MAPPERS, Integer.toString(numMap));
          }
          logMapper = "number of mappers: " + numMap + "; ";
        }

        TaskReport[] reducers = jc.getReduceTaskReports(rj.getJobID());
        if (reducers == null) {
          logReducer = "no information for number of reducers. ";
        } else {
          numReduce = reducers.length;
          if (ss != null) {
            ss.getHiveHistory().setTaskProperty(SessionState.get().getQueryId(), getId(),
              Keys.TASK_NUM_REDUCERS, Integer.toString(numReduce));
          }
          logReducer = "number of reducers: " + numReduce;
        }

        console
            .printInfo("Hadoop job information for " + getId() + ": " + logMapper + logReducer);
        initOutputPrinted = true;
      }

      RunningJob newRj = jc.getJob(rj.getJobID());
      if (newRj == null) {
        // under exceptional load, hadoop may not be able to look up status
        // of finished jobs (because it has purged them from memory). From
        // hive's perspective - it's equivalent to the job having failed.
        // So raise a meaningful exception
        throw new IOException("Could not find status of job:" + rj.getJobID());
      } else {
        th.setRunningJob(newRj);
        rj = newRj;
      }

      // If fatal errors happen we should kill the job immediately rather than
      // let the job retry several times, which eventually lead to failure.
      if (fatal) {
        continue; // wait until rj.isComplete
      }

      Counters ctrs = th.getCounters();

      if (fatal = checkFatalErrors(ctrs, errMsg)) {
        console.printError("[Fatal Error] " + errMsg.toString() + ". Killing the job.");
        rj.killJob();
        continue;
      }
      errMsg.setLength(0);

      updateCounters(ctrs, rj);

      // Prepare data for Client Stat Publishers (if any present) and execute them
      if (clientStatPublishers.size() > 0 && ctrs != null) {
        Map<String, Double> exctractedCounters = extractAllCounterValues(ctrs);
        for (ClientStatsPublisher clientStatPublisher : clientStatPublishers) {
          try {
            clientStatPublisher.run(exctractedCounters, rj.getID().toString());
          } catch (RuntimeException runtimeException) {
            LOG.error("Exception " + runtimeException.getClass().getCanonicalName()
                + " thrown when running clientStatsPublishers. The stack trace is: ",
                runtimeException);
          }
        }
      }

      String report = " " + getId() + " map = " + mapProgress + "%,  reduce = " + reduceProgress
          + "%";


      if (!report.equals(lastReport)
          || System.currentTimeMillis() >= reportTime + maxReportInterval) {
        // find out CPU msecs
        // In the case that we can't find out this number, we just skip the step to print
        // it out.
        if (ctrs != null) {
          Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter",
              "CPU_MILLISECONDS");
          if (counterCpuMsec != null) {
            long newCpuMSec = counterCpuMsec.getValue();
            if (newCpuMSec > 0) {
              cpuMsec = newCpuMSec;
              report += ", Cumulative CPU "
                + (cpuMsec / 1000D) + " sec";
            }
          }
        }

        // write out serialized plan with counters to log file
        // LOG.info(queryPlan);
        String output = dateFormat.format(Calendar.getInstance().getTime()) + report;
        SessionState ss = SessionState.get();
        if (ss != null) {
          ss.getHiveHistory().setTaskCounters(SessionState.get().getQueryId(), getId(), ctrs);
          ss.getHiveHistory().setTaskProperty(SessionState.get().getQueryId(), getId(),
              Keys.TASK_HADOOP_PROGRESS, output);
          if (ss.getConf().getBoolVar(HiveConf.ConfVars.HIVE_LOG_INCREMENTAL_PLAN_PROGRESS)) {
            ss.getHiveHistory().progressTask(SessionState.get().getQueryId(), this.task);
            this.callBackObj.logPlanProgress(ss);
          }
        }
        console.printInfo(output);
        lastReport = report;
        reportTime = System.currentTimeMillis();
      }
    }

    if (cpuMsec > 0) {
      console.printInfo("MapReduce Total cumulative CPU time: "
          + Utilities.formatMsecToStr(cpuMsec));
    }

    boolean success;

    Counters ctrs = th.getCounters();
    if (fatal) {
      success = false;
    } else {
      // check for fatal error again in case it occurred after
      // the last check before the job is completed
      if (checkFatalErrors(ctrs, errMsg)) {
        console.printError("[Fatal Error] " + errMsg.toString());
        success = false;
      } else {
        SessionState ss = SessionState.get();
        if (ss != null) {
          ss.getHiveHistory().setTaskCounters(SessionState.get().getQueryId(), getId(), ctrs);
        }
        success = rj.isSuccessful();
      }
    }

    if (ctrs != null) {
      Counter counterCpuMsec = ctrs.findCounter("org.apache.hadoop.mapred.Task$Counter",
          "CPU_MILLISECONDS");
      if (counterCpuMsec != null) {
        long newCpuMSec = counterCpuMsec.getValue();
        if (newCpuMSec > cpuMsec) {
          cpuMsec = newCpuMSec;
        }
      }
    }

    MapRedStats mapRedStats = new MapRedStats(numMap, numReduce, cpuMsec, success, rj.getID().toString());
    mapRedStats.setCounters(ctrs);

    // update based on the final value of the counters
    updateCounters(ctrs, rj);

    SessionState ss = SessionState.get();
    if (ss != null) {
      this.callBackObj.logPlanProgress(ss);
    }
    // LOG.info(queryPlan);
    return mapRedStats;
View Full Code Here

    outf = new File(outf, qf.getName().concat(".out"));
    FileOutputStream fo = new FileOutputStream(outf);
    ss.out = new PrintStream(fo, true, "UTF-8");
    ss.err = new CachingPrintStream(fo, true, "UTF-8");
    ss.setIsSilent(true);
    SessionState oldSs = SessionState.get();
    if (oldSs != null && oldSs.out != null && oldSs.out != System.out) {
      oldSs.out.close();
    }
    SessionState.start(ss);
View Full Code Here

    int result = 0;

    if (sortResults) {
      // sort will try to open the output file in write mode on windows. We need to
      // close it first.
      SessionState ss = SessionState.get();
      if (ss != null && ss.out != null && ss.out != System.out) {
  ss.out.close();
      }

      String inSorted = inFileName + SORT_SUFFIX;
View Full Code Here

  private static int getPositionFromInternalName(String internalName) {
    return HiveConf.getPositionFromInternalName(internalName);
  }

  private String fetchFilesNotInLocalFilesystem(String cmd) {
    SessionState ss = SessionState.get();
    String progName = getScriptProgName(cmd);

    if (SessionState.canDownloadResource(progName)) {
      String filePath = ss.add_resource(ResourceType.FILE, progName, true);
      if (filePath == null) {
        throw new RuntimeException("Could not download the resource: " + progName);
      }
      Path p = new Path(filePath);
      String fileName = p.getName();
View Full Code Here

  private Driver qp;
  private FsShell dfs;
  private LogHelper console;

  public CliDriver() {
    SessionState ss = SessionState.get();
    sp = new SetProcessor();
    qp = new Driver();
    dfs = new FsShell(ss != null ? ss.getConf() : new Configuration ());
    Log LOG = LogFactory.getLog("CliDriver");
    console = new LogHelper(LOG);
  }
View Full Code Here

    Log LOG = LogFactory.getLog("CliDriver");
    console = new LogHelper(LOG);
  }
 
  public int processCmd(String cmd) {
    SessionState ss = SessionState.get();
   
    String cmd_trimmed = cmd.trim();
    String[] tokens = cmd_trimmed.split("\\s+");
    String cmd_1 = cmd_trimmed.substring(tokens[0].length());
    int ret = 0;
   
    if(tokens[0].toLowerCase().equals("set")) {

      ret = sp.run(cmd_1);

    } else if (cmd_trimmed.toLowerCase().equals("quit") || cmd_trimmed.toLowerCase().equals("exit")) {

      // if we have come this far - either the previous commands
      // are all successful or this is command line. in either case
      // this counts as a successful run
      System.exit(0);

    } else if (cmd_trimmed.startsWith("!")) {

      String shell_cmd = cmd_trimmed.substring(1);

      //shell_cmd = "/bin/bash -c \'" + shell_cmd + "\'";
      try {
        Process executor = Runtime.getRuntime().exec(shell_cmd);
        StreamPrinter outPrinter = new StreamPrinter(executor.getInputStream(), null, ss.out);
        StreamPrinter errPrinter = new StreamPrinter(executor.getErrorStream(), null, ss.err);
       
        outPrinter.start();
        errPrinter.start();
     
        ret = executor.waitFor();
        if (ret != 0) {
          console.printError("Command failed with exit code = " + ret);
        }
      }
      catch (Exception e) {
        console.printError("Exception raised from Shell command " + e.getLocalizedMessage(),
                           org.apache.hadoop.util.StringUtils.stringifyException(e));
        ret = 1;
      }

    } else if (tokens[0].toLowerCase().equals("dfs")) {

      String [] alt_tokens = new String [tokens.length-1];
      System.arraycopy(tokens, 1, alt_tokens, 0, tokens.length-1);
      tokens = alt_tokens;

      try {
        PrintStream oldOut = System.out;
        System.setOut(ss.out);
        ret = dfs.run(tokens);
        System.setOut(oldOut);
        if (ret != 0) {
          console.printError("Command failed with exit code = " + ret);
        }
      } catch (Exception e) {
        console.printError("Exception raised from DFSShell.run " + e.getLocalizedMessage(),
                           org.apache.hadoop.util.StringUtils.stringifyException(e));
        ret = 1;
      }

    } else if (tokens[0].toLowerCase().equals("list")) {

      SessionState.ResourceType t;
      if(tokens.length < 2 || (t = SessionState.find_resource_type(tokens[1])) == null) {
        console.printError("Usage: list [" +
                           StringUtils.join(SessionState.ResourceType.values(),"|") +
                           "] [<value> [<value>]*]" );
        ret = 1;
      } else {
        List<String> filter = null;
        if(tokens.length >=3) {
          System.arraycopy(tokens, 2, tokens, 0, tokens.length-2);
          filter = Arrays.asList(tokens);
        }
        Set<String> s = ss.list_resource(t, filter);
        if(s != null && !s.isEmpty())
          ss.out.println(StringUtils.join(s, "\n"));
      }

    } else if (tokens[0].toLowerCase().equals("add")) {

      SessionState.ResourceType t;
      if(tokens.length < 3 || (t = SessionState.find_resource_type(tokens[1])) == null) {
        console.printError("Usage: add [" +
                           StringUtils.join(SessionState.ResourceType.values(),"|") +
                           "] <value> [<value>]*");
        ret = 1;
      } else {
        for(int i = 2; i<tokens.length; i++) {
          ss.add_resource(t, tokens[i]);
        }
      }

    } else if (tokens[0].toLowerCase().equals("delete")) {

      SessionState.ResourceType t;
      if(tokens.length < 2 || (t = SessionState.find_resource_type(tokens[1])) == null) {
        console.printError("Usage: delete [" +
                           StringUtils.join(SessionState.ResourceType.values(),"|") +
                           "] [<value>]");
        ret = 1;
      } else if (tokens.length >= 3) {
        for(int i = 2; i<tokens.length; i++) {
          ss.delete_resource(t, tokens[i]);
        }
      } else {
        ss.delete_resource(t);
      }

    } else if (!cmd_trimmed.equals("")) {
      PrintStream out = ss.out;

View Full Code Here

      return false;
    throw new IllegalArgumentException(prefix + "'" + value + "' is not a boolean");
  }

  private void dumpOptions(Properties p) {
    SessionState ss = SessionState.get();

    ss.out.println("silent=" + (ss.getIsSilent() ? "on" : "off"));
    for(Object one: p.keySet()) {
      String oneProp = (String)one;
      String oneValue = p.getProperty(oneProp);
      ss.out.println(oneProp+"="+oneValue);
    }
View Full Code Here

      ss.out.println(oneProp+"="+oneValue);
    }
  }

  private void dumpOption(Properties p, String s) {
    SessionState ss = SessionState.get();
   
    if(p.getProperty(s) != null) {
      ss.out.println(s+"="+p.getProperty(s));
    } else {
      ss.out.println(s+" is undefined");
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.session.SessionState

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.