Package org.apache.hadoop.hive.ql.session

Examples of org.apache.hadoop.hive.ql.session.SessionState


      cpr = super.run(command);
    } catch (CommandNeedRetryException e) {
      return new CommandProcessorResponse(-1, e.toString(), "");
    }

    SessionState ss = SessionState.get();

    if (cpr.getResponseCode() == 0){
      // Only attempt to do this, if cmd was successful.
      int rc = setFSPermsNGrp(ss);
      cpr = new CommandProcessorResponse(rc);
    }
    // reset conf vars
    ss.getConf().set(HCatConstants.HCAT_CREATE_DB_NAME, "");
    ss.getConf().set(HCatConstants.HCAT_CREATE_TBL_NAME, "");

    return cpr;
  }
View Full Code Here


  /**
   * Called at the start of job Driver.run().
   */
  public void startQuery(String cmd, String id) {
    SessionState ss = SessionState.get();
    if (ss == null) {
      return;
    }
    QueryInfo ji = new QueryInfo();

View Full Code Here

   *
   * @param task
   */
  public void startTask(String queryId, Task<? extends Serializable> task,
      String taskName) {
    SessionState ss = SessionState.get();
    if (ss == null) {
      return;
    }
    TaskInfo ti = new TaskInfo();

    ti.hm.put(Keys.QUERY_ID.name(), ss.getQueryId());
    ti.hm.put(Keys.TASK_ID.name(), task.getId());
    ti.hm.put(Keys.TASK_NAME.name(), taskName);

    String id = queryId + ":" + task.getId();
    taskInfoMap.put(id, ti);
View Full Code Here

      if (tbl.getParameters() != null) {
        tbl.getParameters().remove(Constants.DDL_TIME);
      }
      org.apache.hadoop.hive.metastore.api.Table tTbl = tbl.getTTable();
      PrincipalPrivilegeSet principalPrivs = new PrincipalPrivilegeSet();
      SessionState ss = SessionState.get();
      if (ss != null) {
        CreateTableAutomaticGrant grants = ss.getCreateTableGrants();
        if (grants != null) {
          principalPrivs.setUserPrivileges(grants.getUserGrants());
          principalPrivs.setGroupPrivileges(grants.getGroupGrants());
          principalPrivs.setRolePrivileges(grants.getRoleGrants());
          tTbl.setPrivileges(principalPrivs);
View Full Code Here

    }
    return metaStoreClient;
  }

  private String getUserName() {
    SessionState ss = SessionState.get();
    if (ss != null && ss.getAuthenticator() != null) {
      return ss.getAuthenticator().getUserName();
    }
    return null;
  }
View Full Code Here

    }
    return null;
  }

  private List<String> getGroupNames() {
    SessionState ss = SessionState.get();
    if (ss != null && ss.getAuthenticator() != null) {
      return ss.getAuthenticator().getGroupNames();
    }
    return null;
  }
View Full Code Here

  private static int getPositionFromInternalName(String internalName) {
    return HiveConf.getPositionFromInternalName(internalName);
  }

  private String fetchFilesNotInLocalFilesystem(String cmd) {
    SessionState ss = SessionState.get();
    String progName = getScriptProgName(cmd);

    if (progName.matches("("+ SessionState.getMatchingSchemaAsRegex() +")://.*")) {
      String filePath = ss.add_resource(ResourceType.FILE, progName, true);
      if (filePath == null) {
        throw new RuntimeException("Could not download the resource: " + progName);
      }
      Path p = new Path(filePath);
      String fileName = p.getName();
View Full Code Here

    return true;
  }

  protected static String getResourceFiles(Configuration conf, SessionState.ResourceType t) {
    // fill in local files to be added to the task environment
    SessionState ss = SessionState.get();
    Set<String> files = (ss == null) ? null : ss.list_resource(t, null);
    if (files != null) {
      List<String> realFiles = new ArrayList<String>(files.size());
      for (String one : files) {
        try {
          realFiles.add(Utilities.realFile(one, conf));
View Full Code Here

  private void doAuthorization(BaseSemanticAnalyzer sem)
      throws HiveException, AuthorizationException {
    HashSet<ReadEntity> inputs = sem.getInputs();
    HashSet<WriteEntity> outputs = sem.getOutputs();
    SessionState ss = SessionState.get();
    HiveOperation op = ss.getHiveOperation();
    Hive db = sem.getDb();
    if (op != null) {
      if (op.equals(HiveOperation.CREATETABLE_AS_SELECT)
          || op.equals(HiveOperation.CREATETABLE)) {
        ss.getAuthorizer().authorize(
            db.getDatabase(db.getCurrentDatabase()), null,
            HiveOperation.CREATETABLE_AS_SELECT.getOutputRequiredPrivileges());
      } else {
        if (op.equals(HiveOperation.IMPORT)) {
          ImportSemanticAnalyzer isa = (ImportSemanticAnalyzer) sem;
          if (!isa.existsTable()) {
            ss.getAuthorizer().authorize(
                db.getDatabase(db.getCurrentDatabase()), null,
                HiveOperation.CREATETABLE_AS_SELECT.getOutputRequiredPrivileges());
          }
        }
      }
      if (outputs != null && outputs.size() > 0) {
        for (WriteEntity write : outputs) {

          if (write.getType() == WriteEntity.Type.PARTITION) {
            Partition part = db.getPartition(write.getTable(), write
                .getPartition().getSpec(), false);
            if (part != null) {
              ss.getAuthorizer().authorize(write.getPartition(), null,
                      op.getOutputRequiredPrivileges());
              continue;
            }
          }

          if (write.getTable() != null) {
            ss.getAuthorizer().authorize(write.getTable(), null,
                    op.getOutputRequiredPrivileges());
          }
        }

      }
    }

    if (inputs != null && inputs.size() > 0) {

      Map<Table, List<String>> tab2Cols = new HashMap<Table, List<String>>();
      Map<Partition, List<String>> part2Cols = new HashMap<Partition, List<String>>();

      Map<String, Boolean> tableUsePartLevelAuth = new HashMap<String, Boolean>();
      for (ReadEntity read : inputs) {
        if (read.getPartition() != null) {
          Table tbl = read.getTable();
          String tblName = tbl.getTableName();
          if (tableUsePartLevelAuth.get(tblName) == null) {
            boolean usePartLevelPriv = (tbl.getParameters().get(
                "PARTITION_LEVEL_PRIVILEGE") != null && ("TRUE"
                .equalsIgnoreCase(tbl.getParameters().get(
                    "PARTITION_LEVEL_PRIVILEGE"))));
            if (usePartLevelPriv) {
              tableUsePartLevelAuth.put(tblName, Boolean.TRUE);
            } else {
              tableUsePartLevelAuth.put(tblName, Boolean.FALSE);
            }
          }
        }
      }

      if (op.equals(HiveOperation.CREATETABLE_AS_SELECT)
          || op.equals(HiveOperation.QUERY)) {
        SemanticAnalyzer querySem = (SemanticAnalyzer) sem;
        ParseContext parseCtx = querySem.getParseContext();
        Map<TableScanOperator, Table> tsoTopMap = parseCtx.getTopToTable();

        for (Map.Entry<String, Operator<? extends Serializable>> topOpMap : querySem
            .getParseContext().getTopOps().entrySet()) {
          Operator<? extends Serializable> topOp = topOpMap.getValue();
          if (topOp instanceof TableScanOperator
              && tsoTopMap.containsKey(topOp)) {
            TableScanOperator tableScanOp = (TableScanOperator) topOp;
            Table tbl = tsoTopMap.get(tableScanOp);
            List<Integer> neededColumnIds = tableScanOp.getNeededColumnIDs();
            List<FieldSchema> columns = tbl.getCols();
            List<String> cols = new ArrayList<String>();
            if (neededColumnIds != null && neededColumnIds.size() > 0) {
              for (int i = 0; i < neededColumnIds.size(); i++) {
                cols.add(columns.get(neededColumnIds.get(i)).getName());
              }
            } else {
              for (int i = 0; i < columns.size(); i++) {
                cols.add(columns.get(i).getName());
              }
            }
            if (tbl.isPartitioned() && tableUsePartLevelAuth.get(tbl.getTableName())) {
              String alias_id = topOpMap.getKey();
              PrunedPartitionList partsList = PartitionPruner.prune(parseCtx
                  .getTopToTable().get(topOp), parseCtx.getOpToPartPruner()
                  .get(topOp), parseCtx.getConf(), alias_id, parseCtx
                  .getPrunedPartitions());
              Set<Partition> parts = new HashSet<Partition>();
              parts.addAll(partsList.getConfirmedPartns());
              parts.addAll(partsList.getUnknownPartns());
              for (Partition part : parts) {
                List<String> existingCols = part2Cols.get(part);
                if (existingCols == null) {
                  existingCols = new ArrayList<String>();
                }
                existingCols.addAll(cols);
                part2Cols.put(part, existingCols);
              }
            } else {
              List<String> existingCols = tab2Cols.get(tbl);
              if (existingCols == null) {
                existingCols = new ArrayList<String>();
              }
              existingCols.addAll(cols);
              tab2Cols.put(tbl, existingCols);
            }
          }
        }
      }


      //cache the results for table authorization
      Set<String> tableAuthChecked = new HashSet<String>();
      for (ReadEntity read : inputs) {
        Table tbl = null;
        if (read.getPartition() != null) {
          tbl = read.getPartition().getTable();
          // use partition level authorization
          if (tableUsePartLevelAuth.get(tbl.getTableName())) {
            List<String> cols = part2Cols.get(read.getPartition());
            if (cols != null && cols.size() > 0) {
              ss.getAuthorizer().authorize(read.getPartition().getTable(),
                  read.getPartition(), cols, op.getInputRequiredPrivileges(),
                  null);
            } else {
              ss.getAuthorizer().authorize(read.getPartition(),
                  op.getInputRequiredPrivileges(), null);
            }
            continue;
          }
        } else if (read.getTable() != null) {
          tbl = read.getTable();
        }

        // if we reach here, it means it needs to do a table authorization
        // check, and the table authorization may already happened because of other
        // partitions
        if (tbl != null && !tableAuthChecked.contains(tbl.getTableName())) {
          List<String> cols = tab2Cols.get(tbl);
          if (cols != null && cols.size() > 0) {
            ss.getAuthorizer().authorize(tbl, null, cols,
                op.getInputRequiredPrivileges(), null);
          } else {
            ss.getAuthorizer().authorize(tbl, op.getInputRequiredPrivileges(),
                null);
          }
          tableAuthChecked.add(tbl.getTableName());
        }
      }
View Full Code Here

        // rj.getJobState() again and we do not want to do an extra RPC call
        initializing = false;
      }

      if (!initOutputPrinted) {
        SessionState ss = SessionState.get();

        String logMapper;
        String logReducer;
 
        TaskReport[] mappers = jc.getMapTaskReports(rj.getJobID());
        if (mappers == null) {
          logMapper = "no information for number of mappers; ";
        } else {
          int numMap = mappers.length;
          if (ss != null) {
            ss.getHiveHistory().setTaskProperty(SessionState.get().getQueryId(), getId(),
              Keys.TASK_NUM_MAPPERS, Integer.toString(numMap));
          }
          logMapper = "number of mappers: " + numMap + "; ";
        }
 
        TaskReport[] reducers = jc.getReduceTaskReports(rj.getJobID());
        if (reducers == null) {
          logReducer = "no information for number of reducers. ";
        } else {
          int numReduce = reducers.length;
          if (ss != null) {
            ss.getHiveHistory().setTaskProperty(SessionState.get().getQueryId(), getId(),
              Keys.TASK_NUM_REDUCERS, Integer.toString(numReduce));
          }
          logReducer = "number of reducers: " + numReduce;
        }

        console
            .printInfo("Hadoop job information for " + getId() + ": " + logMapper + logReducer);
        initOutputPrinted = true;
      }

      RunningJob newRj = jc.getJob(rj.getJobID());
      if (newRj == null) {
        // under exceptional load, hadoop may not be able to look up status
        // of finished jobs (because it has purged them from memory). From
        // hive's perspective - it's equivalent to the job having failed.
        // So raise a meaningful exception
        throw new IOException("Could not find status of job: + rj.getJobID()");
      } else {
        th.setRunningJob(newRj);
        rj = newRj;
      }

      // If fatal errors happen we should kill the job immediately rather than
      // let the job retry several times, which eventually lead to failure.
      if (fatal) {
        continue; // wait until rj.isComplete
      }

      Counters ctrs = th.getCounters();

      if (fatal = checkFatalErrors(ctrs, errMsg)) {
        console.printError("[Fatal Error] " + errMsg.toString() + ". Killing the job.");
        rj.killJob();
        continue;
      }
      errMsg.setLength(0);

      updateCounters(ctrs, rj);

      String report = " " + getId() + " map = " + mapProgress + "%,  reduce = " + reduceProgress
          + "%";

      if (!report.equals(lastReport)
          || System.currentTimeMillis() >= reportTime + maxReportInterval) {

        // write out serialized plan with counters to log file
        // LOG.info(queryPlan);
        String output = dateFormat.format(Calendar.getInstance().getTime()) + report;
        SessionState ss = SessionState.get();
        if (ss != null) {
          ss.getHiveHistory().setTaskCounters(SessionState.get().getQueryId(), getId(), ctrs);
          ss.getHiveHistory().setTaskProperty(SessionState.get().getQueryId(), getId(),
              Keys.TASK_HADOOP_PROGRESS, output);
          ss.getHiveHistory().progressTask(SessionState.get().getQueryId(), this.task);
          this.callBackObj.logPlanProgress(ss);
        }
        console.printInfo(output);
        lastReport = report;
        reportTime = System.currentTimeMillis();
      }
    }

    boolean success;
    Counters ctrs = th.getCounters();

    if (fatal) {
      success = false;
    } else {
      // check for fatal error again in case it occurred after
      // the last check before the job is completed
      if (checkFatalErrors(ctrs, errMsg)) {
        console.printError("[Fatal Error] " + errMsg.toString());
        success = false;
      } else {
        success = rj.isSuccessful();
      }
    }

    this.task.setDone();
    // update based on the final value of the counters
    updateCounters(ctrs, rj);

    SessionState ss = SessionState.get();
    if (ss != null) {
      this.callBackObj.logPlanProgress(ss);
    }
    // LOG.info(queryPlan);
    return (success);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.session.SessionState

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.