Examples of HiveMetaStoreClient


Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

*/
      hiveConf.set("hive.metastore.local", "false");
      hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, serverUri.trim());
    }
    try {
      client = new HiveMetaStoreClient(hiveConf,null);
    } catch (Exception e){
      throw new Exception("Could not instantiate a HiveMetaStoreClient connecting to server uri:["+serverUri+"]",e);
    }
    return client;
  }
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

     * @param outputInfo the table output info
     * @throws IOException the exception in communicating with the metadata server
     */
    @SuppressWarnings("unchecked")
    public static void setOutput(Job job, HowlTableInfo outputInfo) throws IOException {
      HiveMetaStoreClient client = null;

      try {

  Configuration conf = job.getConfiguration();
        client = createHiveClient(outputInfo.getServerUri(), conf);
        Table table = client.getTable(outputInfo.getDatabaseName(), outputInfo.getTableName());

        if( outputInfo.getPartitionValues() == null ) {
          outputInfo.setPartitionValues(new HashMap<String, String>());
        } else {
          //Convert user specified map to have lower case key names
          Map<String, String> valueMap = new HashMap<String, String>();
          for(Map.Entry<String, String> entry : outputInfo.getPartitionValues().entrySet()) {
            valueMap.put(entry.getKey().toLowerCase(), entry.getValue());
          }

          outputInfo.setPartitionValues(valueMap);
        }

        //Handle duplicate publish
        handleDuplicatePublish(job, outputInfo, client, table);

        StorageDescriptor tblSD = table.getSd();
        HowlSchema tableSchema = HowlUtil.extractSchemaFromStorageDescriptor(tblSD);
        StorerInfo storerInfo = InitializeInput.extractStorerInfo(tblSD,table.getParameters());

        List<String> partitionCols = new ArrayList<String>();
        for(FieldSchema schema : table.getPartitionKeys()) {
          partitionCols.add(schema.getName());
        }

        Class<? extends HowlOutputStorageDriver> driverClass =
          (Class<? extends HowlOutputStorageDriver>) Class.forName(storerInfo.getOutputSDClass());
        HowlOutputStorageDriver driver = driverClass.newInstance();

        String tblLocation = tblSD.getLocation();
        String location = driver.getOutputLocation(job,
            tblLocation, partitionCols,
            outputInfo.getPartitionValues());

        //Serialize the output info into the configuration
        OutputJobInfo jobInfo = new OutputJobInfo(outputInfo,
                tableSchema, tableSchema, storerInfo, location, table);
        conf.set(HOWL_KEY_OUTPUT_INFO, HowlUtil.serialize(jobInfo));

        Path tblPath = new Path(tblLocation);

        /*  Set the umask in conf such that files/dirs get created with table-dir
         * permissions. Following three assumptions are made:
         * 1. Actual files/dirs creation is done by RecordWriter of underlying
         * output format. It is assumed that they use default permissions while creation.
         * 2. Default Permissions = FsPermission.getDefault() = 777.
         * 3. UMask is honored by underlying filesystem.
         */

        FsPermission.setUMask(conf, FsPermission.getDefault().applyUMask(
            tblPath.getFileSystem(conf).getFileStatus(tblPath).getPermission()));

        if(UserGroupInformation.isSecurityEnabled()){
          UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
          // check if oozie has set up a howl deleg. token - if so use it
          TokenSelector<? extends TokenIdentifier> tokenSelector = new DelegationTokenSelector();
          // TODO: will oozie use a "service" called "oozie" - then instead of
          // new Text() do new Text("oozie") below - if this change is made also
          // remember to do:
          //  job.getConfiguration().set(HOWL_KEY_TOKEN_SIGNATURE, "oozie");
          // Also change code in HowlOutputCommitter.cleanupJob() to cancel the
          // token only if token.service is not "oozie" - remove the condition of
          // HOWL_KEY_TOKEN_SIGNATURE != null in that code.
          Token<? extends TokenIdentifier> token = tokenSelector.selectToken(
              new Text(), ugi.getTokens());
          if(token != null) {

            job.getCredentials().addToken(new Text(ugi.getUserName()),token);

          } else {

            // we did not get token set up by oozie, let's get them ourselves here.
            // we essentially get a token per unique Output HowlTableInfo - this is
            // done because through Pig, setOutput() method is called multiple times
            // We want to only get the token once per unique output HowlTableInfo -
            // we cannot just get one token since in multi-query case (> 1 store in 1 job)
            // or the case when a single pig script results in > 1 jobs, the single
            // token will get cancelled by the output committer and the subsequent
            // stores will fail - by tying the token with the concatenation of
            // dbname, tablename and partition keyvalues of the output
            // TableInfo, we can have as many tokens as there are stores and the TokenSelector
            // will correctly pick the right tokens which the committer will use and
            // cancel.
            String tokenSignature = getTokenSignature(outputInfo);
            if(tokenMap.get(tokenSignature) == null) {
              // get delegation tokens from howl server and store them into the "job"
              // These will be used in the HowlOutputCommitter to publish partitions to
              // howl
              String tokenStrForm = client.getDelegationTokenWithSignature(ugi.getUserName(),
                  tokenSignature);
              Token<DelegationTokenIdentifier> t = new Token<DelegationTokenIdentifier>();
              t.decodeFromUrlString(tokenStrForm);
              tokenMap.put(tokenSignature, t);
            }
            job.getCredentials().addToken(new Text(ugi.getUserName() + tokenSignature),
                tokenMap.get(tokenSignature));
            // this will be used by the outputcommitter to pass on to the metastore client
            // which in turn will pass on to the TokenSelector so that it can select
            // the right token.
            job.getConfiguration().set(HOWL_KEY_TOKEN_SIGNATURE, tokenSignature);
        }
       }
      } catch(Exception e) {
        if( e instanceof HowlException ) {
          throw (HowlException) e;
        } else {
          throw new HowlException(ErrorType.ERROR_SET_OUTPUT, e);
        }
      } finally {
        if( client != null ) {
          client.close();
        }
      }
    }
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

          }
        }

      }

      return new HiveMetaStoreClient(hiveConf);
    }
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

  }

  public void testAddPartition() throws Exception {
    Configuration conf = new Configuration();
    HiveConf hiveConf = new HiveConf(conf, TestAddPartition.class);
    HiveMetaStoreClient client = null;

    try {
      client = new HiveMetaStoreClient(hiveConf);

      String dbName = "testdb";
      String tableName = "tablename";

      Table tbl = new Table();
      tbl.setTableName(tableName);
      tbl.setDbName(dbName);
      tbl.setParameters(new HashMap<String, String>());

      StorageDescriptor sd = new StorageDescriptor();
      sd.setSerdeInfo(new SerDeInfo());
      sd.getSerdeInfo().setName(tbl.getTableName());
      sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName());

      List<FieldSchema> fss = new ArrayList<FieldSchema>();
      fss.add(new FieldSchema("name", Constants.STRING_TYPE_NAME, ""));
      sd.setCols(fss);
      tbl.setSd(sd);

      tbl.setPartitionKeys(new ArrayList<FieldSchema>());
      tbl.getPartitionKeys().add(
          new FieldSchema(PART1_NAME, Constants.STRING_TYPE_NAME, ""));
      tbl.getPartitionKeys().add(
          new FieldSchema(PART2_NAME, Constants.STRING_TYPE_NAME, ""));

      client.dropTable(dbName, tableName);
      client.dropDatabase(dbName);

      client.createDatabase(dbName, "newloc");
      client.createTable(tbl);

      tbl = client.getTable(dbName, tableName);

      List<String> partValues = new ArrayList<String>();
      partValues.add("value1");
      partValues.add("value2");

      Map<String, String> part1 = new HashMap<String, String>();
      part1.put(PART1_NAME, "value1");
      part1.put(PART2_NAME, "value2");
     
      List<Map<String, String>> partitions = new ArrayList<Map<String, String>>();
      partitions.add(part1);
     
      // no partitions yet
      List<Partition> parts = client.listPartitions(dbName, tableName,
          (short) -1);
      assertTrue(parts.isEmpty());

      String partitionLocation = PART1_NAME + Path.SEPARATOR + PART2_NAME;
      // add the partitions
      for (Map<String,String> map : partitions) {
        AddPartitionDesc addPartition = new AddPartitionDesc(dbName,
            tableName, map, partitionLocation);
        Task<DDLWork> task = TaskFactory.get(new DDLWork(addPartition), hiveConf);
        task.initialize(hiveConf);
        assertEquals(0, task.execute());
      }

      // should have one
      parts = client.listPartitions(dbName, tableName, (short) -1);
      assertEquals(1, parts.size());
      Partition insertedPart = parts.get(0);
      assertEquals(tbl.getSd().getLocation() + Path.SEPARATOR + partitionLocation,
          insertedPart.getSd().getLocation());

      client.dropPartition(dbName, tableName, insertedPart.getValues());

      // add without location specified

      AddPartitionDesc addPartition = new AddPartitionDesc(dbName, tableName, part1, null);
      Task<DDLWork> task = TaskFactory.get(new DDLWork(addPartition), hiveConf);
      task.initialize(hiveConf);
      assertEquals(0, task.execute());
      parts = client.listPartitions(dbName, tableName, (short) -1);
      assertEquals(1, parts.size());

      // see that this fails properly
      addPartition = new AddPartitionDesc(dbName, "doesnotexist", part1, null);
      task = TaskFactory.get(new DDLWork(addPartition), hiveConf);
      task.initialize(hiveConf);
      assertEquals(1, task.execute());
    } finally {
      if (client != null) {
        client.close();
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

   * @throws HiveMetaException
   */
  private IMetaStoreClient createMetaStoreClient() throws MetaException {
    boolean useFileStore = conf.getBoolean("hive.metastore.usefilestore", false);
    if(!useFileStore) {
      return new HiveMetaStoreClient(this.conf);
    }
    return new MetaStoreClient(this.conf);
  }
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

    hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName());
    hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
    hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
    hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
    clientWH = new Warehouse(hcatConf);
    msc = new HiveMetaStoreClient(hcatConf, null);
    System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
    System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
  }
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

    System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
    System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");

    hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, warehousedir.toString());
    try {
      hmsc = new HiveMetaStoreClient(hiveConf, null);
      initalizeTables();
    } catch (Throwable e) {
      LOG.error("Exception encountered while setting up testcase", e);
      throw new Exception(e);
    } finally {
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

  @Before
  public void setUp() throws Exception {
    if (driver == null) {
      setUpHiveConf();
      driver = new Driver(hiveConf);
      client = new HiveMetaStoreClient(hiveConf);
      SessionState.start(new CliSessionState(hiveConf));
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

        HCatSemanticAnalyzer.class.getName());
    hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
    hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
    hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname,
        "false");
    msc = new HiveMetaStoreClient(hcatConf, null);
    System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
    System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
  }
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.HiveMetaStoreClient

    }
  }

  private void cancelDelegationTokens(JobContext context) throws IOException{
    LOG.info("Cancelling deletgation token for the job.");
    HiveMetaStoreClient client = null;
    try {
      HiveConf hiveConf = HCatUtil
          .getHiveConf(context.getConfiguration());
      client = HCatUtil.getHiveClient(hiveConf);
      // cancel the deleg. tokens that were acquired for this job now that
      // we are done - we should cancel if the tokens were acquired by
      // HCatOutputFormat and not if they were supplied by Oozie.
      // In the latter case the HCAT_KEY_TOKEN_SIGNATURE property in
      // the conf will not be set
      String tokenStrForm = client.getTokenStrForm();
      if (tokenStrForm != null
          && context.getConfiguration().get(
              HCatConstants.HCAT_KEY_TOKEN_SIGNATURE) != null) {
        client.cancelDelegationToken(tokenStrForm);
      }
    } catch (MetaException e) {
      LOG.warn("MetaException while cancelling delegation token.", e);
    } catch (TException e) {
      LOG.warn("TException while cancelling delegation token.", e);
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.