Examples of HiveMetaStoreClient


Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

    Pair<String, String> dbTablePair = PigHCatUtil.getDBTableNames(location);
    String dbName = dbTablePair.first;
    String tableName = dbTablePair.second;
    Table table = null;
    HiveMetaStoreClient client = null;
    try {
      client = getHiveMetaClient(hcatServerUri, hcatServerPrincipal, PigHCatUtil.class);
      table = HCatUtil.getTable(client, dbName, tableName);
    } catch (NoSuchObjectException nsoe) {
      throw new PigException("Table not found : " + nsoe.getMessage(), PIG_EXCEPTION_CODE); // prettier error messages to frontend
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

  }

  @Override
  public void checkOutputSpecs(JobContext context) throws IOException, InterruptedException {
    OutputJobInfo jobInfo = HCatOutputFormat.getJobInfo(context);
    HiveMetaStoreClient client = null;
    try {
      HiveConf hiveConf = HCatUtil.getHiveConf(context.getConfiguration());
      client = HCatUtil.getHiveClient(hiveConf);
      handleDuplicatePublish(context,
        jobInfo,
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

  @Before
  public void setUp() throws Exception {
    if (driver == null) {
      setUpHiveConf();
      driver = new Driver(hiveConf);
      client = new HiveMetaStoreClient(hiveConf);
      SessionState.start(new CliSessionState(hiveConf));
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

    hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
    hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
    hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
    hcatConf.set(HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT.varname, "60");
    clientWH = new Warehouse(hcatConf);
    msc = new HiveMetaStoreClient(hcatConf, null);
    System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
    System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
  }
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

  }

  private String buildHcatDelegationToken(String user)
    throws IOException, InterruptedException, MetaException, TException {
    HiveConf c = new HiveConf();
    final HiveMetaStoreClient client = new HiveMetaStoreClient(c);
    LOG.info("user: " + user + " loginUser: " + UserGroupInformation.getLoginUser().getUserName());
    final TokenWrapper twrapper = new TokenWrapper();
    final UserGroupInformation ugi = UgiFactory.getUgi(user);
    String s = ugi.doAs(new PrivilegedExceptionAction<String>() {
      public String run()
        throws IOException, MetaException, TException {
        String u = ugi.getUserName();
        return client.getDelegationToken(u);
      }
    });
    return s;
  }
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

      moveTaskOutputs(fs, src, src, tblPath, false);
      fs.delete(src, true);
      return;
    }

    HiveMetaStoreClient client = null;
    HCatTableInfo tableInfo = jobInfo.getTableInfo();
    List<Partition> partitionsAdded = new ArrayList<Partition>();
    try {
      HiveConf hiveConf = HCatUtil.getHiveConf(conf);
      client = HCatUtil.getHiveClient(hiveConf);
      StorerInfo storer = InternalUtil.extractStorerInfo(table.getTTable().getSd(),table.getParameters());

      FileStatus tblStat = fs.getFileStatus(tblPath);
      String grpName = tblStat.getGroup();
      FsPermission perms = tblStat.getPermission();

      List<Partition> partitionsToAdd = new ArrayList<Partition>();
      if (!dynamicPartitioningUsed){
        partitionsToAdd.add(
            constructPartition(
                context,jobInfo,
                tblPath.toString(), jobInfo.getPartitionValues()
                ,jobInfo.getOutputSchema(), getStorerParameterMap(storer)
                ,table, fs
                ,grpName,perms));
      }else{
        for (Entry<String,Map<String,String>> entry : partitionsDiscoveredByPath.entrySet()){
          partitionsToAdd.add(
              constructPartition(
                  context,jobInfo,
                  getPartitionRootLocation(entry.getKey(),entry.getValue().size()), entry.getValue()
                  ,jobInfo.getOutputSchema(), getStorerParameterMap(storer)
                  ,table, fs
                  ,grpName,perms));
        }
      }

      ArrayList<Map<String,String>> ptnInfos = new ArrayList<Map<String,String>>();
      for(Partition ptn : partitionsToAdd){
        ptnInfos.add(InternalUtil.createPtnKeyValueMap(new Table(tableInfo.getTable()), ptn));
      }

      //Publish the new partition(s)
      if (dynamicPartitioningUsed && harProcessor.isEnabled() && (!partitionsToAdd.isEmpty())){

        Path src = new Path(ptnRootLocation);
        // check here for each dir we're copying out, to see if it
        // already exists, error out if so
        moveTaskOutputs(fs, src, src, tblPath, true);
        moveTaskOutputs(fs, src, src, tblPath, false);
        fs.delete(src, true);
        try {
          updateTableSchema(client, table, jobInfo.getOutputSchema());
          LOG.info("HAR is being used. The table {} has new partitions {}.", table.getTableName(), ptnInfos);
          client.add_partitions(partitionsToAdd);
          partitionsAdded = partitionsToAdd;
        } catch (Exception e){
          // There was an error adding partitions : rollback fs copy and rethrow
          for (Partition p : partitionsToAdd){
            Path ptnPath = new Path(harProcessor.getParentFSPath(new Path(p.getSd().getLocation())));
            if (fs.exists(ptnPath)){
              fs.delete(ptnPath,true);
            }
          }
          throw e;
        }

      }else{
        // no harProcessor, regular operation
        updateTableSchema(client, table, jobInfo.getOutputSchema());
        LOG.info("HAR not is not being used. The table {} has new partitions {}.", table.getTableName(), ptnInfos);
        if (dynamicPartitioningUsed && (partitionsToAdd.size()>0)){
          Path src = new Path(ptnRootLocation);
          moveTaskOutputs(fs, src, src, tblPath, true);
          moveTaskOutputs(fs, src, src, tblPath, false);
          fs.delete(src, true);
        }
        client.add_partitions(partitionsToAdd);
        partitionsAdded = partitionsToAdd;
      }
    } catch (Exception e) {
      if (partitionsAdded.size() > 0) {
        try {
          // baseCommitter.cleanupJob failed, try to clean up the
          // metastore
          for (Partition p : partitionsAdded) {
            client.dropPartition(tableInfo.getDatabaseName(),
                tableInfo.getTableName(), p.getValues());
          }
        } catch (Exception te) {
          // Keep cause as the original exception
          throw new HCatException(
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

        HCatSemanticAnalyzer.class.getName());
    hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
    hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
    hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname,
        "false");
    msc = new HiveMetaStoreClient(hcatConf, null);
    System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
    System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
  }
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

      moveTaskOutputs(fs, src, src, tblPath, false);
      fs.delete(src, true);
      return;
    }

    HiveMetaStoreClient client = null;
    HCatTableInfo tableInfo = jobInfo.getTableInfo();
    List<Partition> partitionsAdded = new ArrayList<Partition>();
    try {
      HiveConf hiveConf = HCatUtil.getHiveConf(conf);
      client = HCatUtil.getHiveClient(hiveConf);
      StorerInfo storer = InternalUtil.extractStorerInfo(table.getTTable().getSd(),table.getParameters());

      FileStatus tblStat = fs.getFileStatus(tblPath);
      String grpName = tblStat.getGroup();
      FsPermission perms = tblStat.getPermission();

      List<Partition> partitionsToAdd = new ArrayList<Partition>();
      if (!dynamicPartitioningUsed){
        partitionsToAdd.add(
            constructPartition(
                context,jobInfo,
                tblPath.toString(), jobInfo.getPartitionValues()
                ,jobInfo.getOutputSchema(), getStorerParameterMap(storer)
                ,table, fs
                ,grpName,perms));
      }else{
        for (Entry<String,Map<String,String>> entry : partitionsDiscoveredByPath.entrySet()){
          partitionsToAdd.add(
              constructPartition(
                  context,jobInfo,
                  getPartitionRootLocation(entry.getKey(),entry.getValue().size()), entry.getValue()
                  ,jobInfo.getOutputSchema(), getStorerParameterMap(storer)
                  ,table, fs
                  ,grpName,perms));
        }
      }

      ArrayList<Map<String,String>> ptnInfos = new ArrayList<Map<String,String>>();
      for(Partition ptn : partitionsToAdd){
        ptnInfos.add(InternalUtil.createPtnKeyValueMap(new Table(tableInfo.getTable()), ptn));
      }

      //Publish the new partition(s)
      if (dynamicPartitioningUsed && harProcessor.isEnabled() && (!partitionsToAdd.isEmpty())){

        Path src = new Path(ptnRootLocation);
        // check here for each dir we're copying out, to see if it
        // already exists, error out if so
        moveTaskOutputs(fs, src, src, tblPath, true);
        moveTaskOutputs(fs, src, src, tblPath, false);
        fs.delete(src, true);
        try {
          updateTableSchema(client, table, jobInfo.getOutputSchema());
          LOG.info("HAR is being used. The table {} has new partitions {}.", table.getTableName(), ptnInfos);
          client.add_partitions(partitionsToAdd);
          partitionsAdded = partitionsToAdd;
        } catch (Exception e){
          // There was an error adding partitions : rollback fs copy and rethrow
          for (Partition p : partitionsToAdd){
            Path ptnPath = new Path(harProcessor.getParentFSPath(new Path(p.getSd().getLocation())));
            if (fs.exists(ptnPath)){
              fs.delete(ptnPath,true);
            }
          }
          throw e;
        }

      }else{
        // no harProcessor, regular operation
        updateTableSchema(client, table, jobInfo.getOutputSchema());
        LOG.info("HAR not is not being used. The table {} has new partitions {}.", table.getTableName(), ptnInfos);
        if (dynamicPartitioningUsed && (partitionsToAdd.size()>0)){
          Path src = new Path(ptnRootLocation);
          moveTaskOutputs(fs, src, src, tblPath, true);
          moveTaskOutputs(fs, src, src, tblPath, false);
          fs.delete(src, true);
        }
        client.add_partitions(partitionsToAdd);
        partitionsAdded = partitionsToAdd;
      }
    } catch (Exception e) {
      if (partitionsAdded.size() > 0) {
        try {
          // baseCommitter.cleanupJob failed, try to clean up the
          // metastore
          for (Partition p : partitionsAdded) {
            client.dropPartition(tableInfo.getDatabaseName(),
                tableInfo.getTableName(), p.getValues());
          }
        } catch (Exception te) {
          // Keep cause as the original exception
          throw new HCatException(
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

    }
  }

  private void cancelDelegationTokens(JobContext context) throws IOException{
    LOG.info("Cancelling deletgation token for the job.");
    HiveMetaStoreClient client = null;
    try {
      HiveConf hiveConf = HCatUtil
          .getHiveConf(context.getConfiguration());
      client = HCatUtil.getHiveClient(hiveConf);
      // cancel the deleg. tokens that were acquired for this job now that
      // we are done - we should cancel if the tokens were acquired by
      // HCatOutputFormat and not if they were supplied by Oozie.
      // In the latter case the HCAT_KEY_TOKEN_SIGNATURE property in
      // the conf will not be set
      String tokenStrForm = client.getTokenStrForm();
      if (tokenStrForm != null
          && context.getConfiguration().get(
              HCatConstants.HCAT_KEY_TOKEN_SIGNATURE) != null) {
        client.cancelDelegationToken(tokenStrForm);
      }
    } catch (MetaException e) {
      LOG.warn("MetaException while cancelling delegation token.", e);
    } catch (TException e) {
      LOG.warn("TException while cancelling delegation token.", e);
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

    System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
    System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");

    hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, warehousedir.toString());
    try {
      hmsc = new HiveMetaStoreClient(hiveConf, null);
      initalizeTables();
    } catch (Throwable e) {
      LOG.error("Exception encountered while setting up testcase", e);
      throw new Exception(e);
    } finally {
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.