Examples of HiveMetaStoreClient


Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

    }

    static HiveMetaStoreClient createHiveClient(String url, Configuration conf) throws IOException, MetaException {
      HiveConf hiveConf = getHiveConf(url, conf);
//      HCatUtil.logHiveConf(LOG, hiveConf);
      return new HiveMetaStoreClient(hiveConf);
    }
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

  protected void setUp() throws Exception {
    super.setUp();
    hiveConf = new HiveConf(this.getClass());

    try {
      client = new HiveMetaStoreClient(hiveConf, null);

      initTable();
    } catch (Throwable e) {
      System.err.println("Unable to open the metastore");
      System.err.println(StringUtils.stringifyException(e));
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

    if(kerberosPrincipal != null){
      hiveConf.setBoolean(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname, true);
      hiveConf.set(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname, kerberosPrincipal);
    }

    return new HiveMetaStoreClient(hiveConf,null);
  }
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

  public static void setInput(Job job, HCatTableInfo inputInfo) throws Exception {

    //* Create and initialize an JobInfo object
    //* Serialize the JobInfo and save in the Job's Configuration object

    HiveMetaStoreClient client = null;

    try {
      client = createHiveMetaClient(job.getConfiguration(),inputInfo);
      Table table = client.getTable(inputInfo.getDatabaseName(), inputInfo.getTableName());
      HCatSchema tableSchema = HCatUtil.getTableSchemaWithPtnCols(table);

      List<PartInfo> partInfoList = new ArrayList<PartInfo>();

      if( table.getPartitionKeys().size() != 0 ) {
        //Partitioned table
        List<Partition> parts = client.listPartitionsByFilter(
            inputInfo.getDatabaseName(), inputInfo.getTableName(),
            inputInfo.getFilter(), (short) -1);

        // Default to 100,000 partitions if hive.metastore.maxpartition is not defined
        int maxPart = hiveConf.getInt("hcat.metastore.maxpartitions", 100000);
        if (parts != null && parts.size() > maxPart) {
          throw new HCatException(ErrorType.ERROR_EXCEED_MAXPART, "total number of partitions is " + parts.size());
        }

        // populate partition info
        for (Partition ptn : parts){
          PartInfo partInfo = extractPartInfo(ptn.getSd(),ptn.getParameters());
          partInfo.setPartitionValues(createPtnKeyValueMap(table,ptn));
          partInfoList.add(partInfo);
        }

      }else{
        //Non partitioned table
        PartInfo partInfo = extractPartInfo(table.getSd(),table.getParameters());
        partInfo.setPartitionValues(new HashMap<String,String>());
        partInfoList.add(partInfo);
      }

      JobInfo hcatJobInfo = new JobInfo(inputInfo, tableSchema, partInfoList);
      inputInfo.setJobInfo(hcatJobInfo);

      job.getConfiguration().set(
          HCatConstants.HCAT_KEY_JOB_INFO,
          HCatUtil.serialize(hcatJobInfo)
      );
    } finally {
      if (client != null ) {
        client.close();
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

            throw new MetaException(
              "Failed to load storage handler:  " + ex.getMessage());
          }
        }
      };
    return new HiveMetaStoreClient(conf, hookLoader);
  }
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

    hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName());
    hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
    hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
    hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
    clientWH = new Warehouse(hcatConf);
    msc = new HiveMetaStoreClient(hcatConf,null);
    System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
    System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
  }
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

    HiveConf hiveConf = new HiveConf(this.getClass());
    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
    driver = new Driver(hiveConf);
    client = new HiveMetaStoreClient(hiveConf);
    SessionState.start(new CliSessionState(hiveConf));
    props = new Properties();
    props.setProperty("fs.default.name", cluster.getProperties().getProperty("fs.default.name"));
    fullFileName = cluster.getProperties().getProperty("fs.default.name") + fileName;
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

      }

      OutputJobInfo jobInfo = HCatOutputFormat.getJobInfo(jobContext);

      try {
        HiveMetaStoreClient client = HCatOutputFormat.createHiveClient(
            jobInfo.getTableInfo().getServerUri(), jobContext.getConfiguration());
        // cancel the deleg. tokens that were acquired for this job now that
        // we are done - we should cancel if the tokens were acquired by
        // HCatOutputFormat and not if they were supplied by Oozie. In the latter
        // case the HCAT_KEY_TOKEN_SIGNATURE property in the conf will not be set
        String tokenStrForm = client.getTokenStrForm();
        if(tokenStrForm != null && jobContext.getConfiguration().get
            (HCatConstants.HCAT_KEY_TOKEN_SIGNATURE) != null) {
          client.cancelDelegationToken(tokenStrForm);
        }
        if (harProcessor.isEnabled()){
          String jcTokenStrForm = jobContext.getConfiguration().get(HCatConstants.HCAT_KEY_JOBCLIENT_TOKEN_STRFORM);
          String jcTokenSignature = jobContext.getConfiguration().get(HCatConstants.HCAT_KEY_JOBCLIENT_TOKEN_SIGNATURE);
          if(jcTokenStrForm != null && jcTokenSignature != null) {
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

        moveTaskOutputs(fs, src, src, tblPath,false);
        fs.delete(src, true);
        return;
      }

      HiveMetaStoreClient client = null;
      List<String> values = null;
      HCatTableInfo tableInfo = jobInfo.getTableInfo();

      List<Partition> partitionsAdded = new ArrayList<Partition>();

      try {
        client = HCatOutputFormat.createHiveClient(tableInfo.getServerUri(), conf);

        StorerInfo storer = InitializeInput.extractStorerInfo(table.getSd(),table.getParameters());

        updateTableSchema(client, table, jobInfo.getOutputSchema());
       
        FileStatus tblStat = fs.getFileStatus(tblPath);
        String grpName = tblStat.getGroup();
        FsPermission perms = tblStat.getPermission();

        List<Partition> partitionsToAdd = new ArrayList<Partition>();
        if (!dynamicPartitioningUsed){
          partitionsToAdd.add(
              constructPartition(
                  context,
                  tblPath.toString(), tableInfo.getPartitionValues()
                  ,jobInfo.getOutputSchema(), getStorerParameterMap(storer)
                  ,table, fs
                  ,grpName,perms));
        }else{
          for (Entry<String,Map<String,String>> entry : partitionsDiscoveredByPath.entrySet()){
            partitionsToAdd.add(
                constructPartition(
                    context,
                    getPartitionRootLocation(entry.getKey(),entry.getValue().size()), entry.getValue()
                    ,jobInfo.getOutputSchema(), getStorerParameterMap(storer)
                    ,table, fs
                    ,grpName,perms));
          }
        }

        //Publish the new partition(s)
        if (dynamicPartitioningUsed && harProcessor.isEnabled() && (!partitionsToAdd.isEmpty())){
         
          Path src = new Path(ptnRootLocation);

          // check here for each dir we're copying out, to see if it already exists, error out if so
          moveTaskOutputs(fs, src, src, tblPath,true);
         
          moveTaskOutputs(fs, src, src, tblPath,false);
          fs.delete(src, true);
         
         
//          for (Partition partition : partitionsToAdd){
//            partitionsAdded.add(client.add_partition(partition));
//            // currently following add_partition instead of add_partitions because latter isn't
//            // all-or-nothing and we want to be able to roll back partitions we added if need be.
//          }

          try {
            client.add_partitions(partitionsToAdd);
            partitionsAdded = partitionsToAdd;
          } catch (Exception e){
            // There was an error adding partitions : rollback fs copy and rethrow
            for (Partition p : partitionsToAdd){
              Path ptnPath = new Path(harProcessor.getParentFSPath(new Path(p.getSd().getLocation())));
              if (fs.exists(ptnPath)){
                fs.delete(ptnPath,true);
              }
            }
            throw e;
          }

        }else{
          // no harProcessor, regular operation

          // No duplicate partition publish case to worry about because we'll
          // get a AlreadyExistsException here if so, and appropriately rollback
         
          client.add_partitions(partitionsToAdd);
          partitionsAdded = partitionsToAdd;

          if (dynamicPartitioningUsed && (partitionsAdded.size()>0)){
            Path src = new Path(ptnRootLocation);
            moveTaskOutputs(fs, src, src, tblPath,false);
            fs.delete(src, true);
          }
         
        }
       
        if( baseCommitter != null ) {
          baseCommitter.cleanupJob(context);
        }
        // cancel the deleg. tokens that were acquired for this job now that
        // we are done - we should cancel if the tokens were acquired by
        // HCatOutputFormat and not if they were supplied by Oozie. In the latter
        // case the HCAT_KEY_TOKEN_SIGNATURE property in the conf will not be set
        String tokenStrForm = client.getTokenStrForm();
        if(tokenStrForm != null && context.getConfiguration().get
            (HCatConstants.HCAT_KEY_TOKEN_SIGNATURE) != null) {
          client.cancelDelegationToken(tokenStrForm);
        }

        if (harProcessor.isEnabled()){
          String jcTokenStrForm =
              context.getConfiguration().get(HCatConstants.HCAT_KEY_JOBCLIENT_TOKEN_STRFORM);
          String jcTokenSignature =
              context.getConfiguration().get(HCatConstants.HCAT_KEY_JOBCLIENT_TOKEN_SIGNATURE);
          if(jcTokenStrForm != null && jcTokenSignature != null) {
            HCatUtil.cancelJobTrackerDelegationToken(tokenStrForm,jcTokenSignature);
          }
        }

      } catch (Exception e) {

        if( partitionsAdded.size() > 0 ) {
          try {
            //baseCommitter.cleanupJob failed, try to clean up the metastore
            for (Partition p : partitionsAdded){
            client.dropPartition(tableInfo.getDatabaseName(),
                    tableInfo.getTableName(), p.getValues());
            }
          } catch(Exception te) {
            //Keep cause as the original exception
            throw new HCatException(ErrorType.ERROR_PUBLISHING_PARTITION, e);
          }
        }

        if( e instanceof HCatException ) {
          throw (HCatException) e;
        } else {
          throw new HCatException(ErrorType.ERROR_PUBLISHING_PARTITION, e);
        }
      } finally {
        if( client != null ) {
          client.close();
        }
      }
    }
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

  CommandNeedRetryException, UnknownDBException, InvalidPartitionException, UnknownPartitionException{
    driver.run("create database mydb");
    driver.run("use mydb");
    driver.run("create table mytbl (a string) partitioned by (b string)");
    driver.run("alter table mytbl add partition(b='2011')");
    HiveMetaStoreClient msc = new HiveMetaStoreClient(hiveConf);
    Map<String,String> kvs = new HashMap<String, String>(1);
    kvs.put("b", "2011");
    msc.markPartitionForEvent("mydb", "mytbl", kvs, PartitionEventType.LOAD_DONE);
    driver.run("alter table mytbl drop partition(b='2011')");
    driver.run("drop table mytbl");
    driver.run("drop database mydb");
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.