Examples of Warehouse


Examples of org.apache.hadoop.hive.metastore.Warehouse

   * @param tbl
   * @param tableName
   * @throws MetaException
   */
  private void validateTable(Table tbl, String tableName) throws MetaException {
    Warehouse wh = new Warehouse(hiveConf);
    Table ft = null;
    try {
      ft = hm.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName);
      assertNotNull("Unable to fetch table", ft);
      ft.checkValidity();
      assertEquals("Table names didn't match for table: " + tableName, tbl
          .getTableName(), ft.getTableName());
      assertEquals("Table owners didn't match for table: " + tableName, tbl
          .getOwner(), ft.getOwner());
      assertEquals("Table retention didn't match for table: " + tableName,
          tbl.getRetention(), ft.getRetention());
      assertEquals("Data location is not set correctly",
          wh.getTablePath(hm.getDatabase(DEFAULT_DATABASE_NAME), tableName).toString(),
          ft.getDataLocation().toString());
      // now that URI and times are set correctly, set the original table's uri and times
      // and then compare the two tables
      tbl.setDataLocation(ft.getDataLocation());
      tbl.setCreateTime(ft.getTTable().getCreateTime());
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.Warehouse

  private void authorize(String name, HiveSemanticAnalyzerHookContext cntxt, FsAction action, boolean isDBOp)
                                                      throws MetaException, HiveException, HCatException{


    Warehouse wh = new Warehouse(cntxt.getConf());
    if(!isDBOp){
      // Do validations for table path.
      Table tbl;
      try{
        tbl = cntxt.getHive().getTable(name);
      }
      catch(InvalidTableException ite){
        // Table itself doesn't exist in metastore, nothing to validate.
        return;
      }
      Path path = tbl.getPath();
      if(path != null){
        AuthUtils.authorize(wh.getDnsPath(path), action, cntxt.getConf());
      } else{
        // This will happen, if table exists in metastore for a given
        // tablename, but has no path associated with it, so there is nothing to check.
        // In such cases, do no checks and allow whatever hive behavior is for it.
        return;
      }
    } else{
      // Else, its a DB operation.
      AuthUtils.authorize(wh.getDatabasePath(cntxt.getHive().getDatabase(name)), action, cntxt.getConf());
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.Warehouse

  private void authorize(HiveSemanticAnalyzerHookContext context, String loc) throws SemanticException{

    Path tblDir;
    Configuration conf = context.getConf();
    try {
      Warehouse wh = new Warehouse(conf);
      if (loc == null || loc.isEmpty()){
      Hive hive = context.getHive();
        tblDir = wh.getTablePath(hive.getDatabase(hive.getCurrentDatabase()), tableName).getParent();
      }
      else{
        tblDir = wh.getDnsPath(new Path(loc));
      }

      try {
        AuthUtils.authorize(tblDir, FsAction.WRITE, conf);
      } catch (HCatException e) {
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.Warehouse

    StatsAggregator statsAggregator = null;

    try {
      // Stats setup:
      Warehouse wh = new Warehouse(conf);
      FileSystem fileSys;
      FileStatus[] fileStatus;

      if (!this.getWork().getNoStatsAggregator()) {
        String statsImplementationClass = HiveConf.getVar(conf, HiveConf.ConfVars.HIVESTATSDBCLASS);
        StatsFactory.setImplementation(statsImplementationClass, conf);
        statsAggregator = StatsFactory.getStatsAggregator();
        // manufacture a StatsAggregator
        if (!statsAggregator.connect(conf)) {
          throw new HiveException("StatsAggregator connect failed " + statsImplementationClass);
        }
      }

      TableStatistics tblStats = new TableStatistics();

      org.apache.hadoop.hive.metastore.api.Table tTable = table.getTTable();
      Map<String, String> parameters = tTable.getParameters();

      boolean tableStatsExist = this.existStats(parameters);

      for (String statType : supportedStats) {
        if (parameters.containsKey(statType)) {
          tblStats.setStat(statType, Long.parseLong(parameters.get(statType)));
        }
      }

      if (parameters.containsKey(StatsSetupConst.NUM_PARTITIONS)) {
        tblStats.setNumPartitions(Integer.parseInt(parameters.get(StatsSetupConst.NUM_PARTITIONS)));
      }

      List<Partition> partitions = getPartitionsList();
      boolean atomic = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_STATS_ATOMIC);

      if (partitions == null) {
        // non-partitioned tables:
        if (!tableStatsExist && atomic) {
          return 0;
        }
        Path tablePath = wh.getTablePath(db.getDatabase(table.getDbName()), table.getTableName());
        fileSys = tablePath.getFileSystem(conf);
        fileStatus = Utilities.getFileStatusRecurse(tablePath, 1, fileSys);

        tblStats.setStat(StatsSetupConst.NUM_FILES, fileStatus.length);
        long tableSize = 0L;
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.Warehouse

    }
    catch(Exception e){
      assertTrue(e instanceof ExitException);
      assertEquals(((ExitException)e).getStatus(), 0);
    }
    Warehouse wh = new Warehouse(conf);
    Path dfsPath = wh.getTablePath(Hive.get(conf).getDatabase(MetaStoreUtils.DEFAULT_DATABASE_NAME), "testNoPartTbl");
    FileSystem fs = dfsPath.getFileSystem(conf);
    assertEquals(fs.getFileStatus(dfsPath).getPermission(),FsPermission.valueOf("drwx-wx---"));

    pig.setBatchOn();
    pig.registerQuery("A  = load 'build.xml' as (line:chararray);");
    pig.registerQuery("store A into 'testNoPartTbl' using "+HCatStorer.class.getName()+"();");
    pig.executeBatch();
    FileStatus[] status = fs.listStatus(dfsPath,hiddenFileFilter);

    assertEquals(status.length, 1);
    assertEquals(FsPermission.valueOf("drwx-wx---"),status[0].getPermission());

    try{
      HCatCli.main(new String[]{"-e","create table testPartTbl (line string)  partitioned by (a string) stored as RCFILE", "-p","rwx-wx--x"});
    }
    catch(Exception e){
      assertTrue(e instanceof ExitException);
      assertEquals(((ExitException)e).getStatus(), 0);
    }

    dfsPath = wh.getTablePath(Hive.get(conf).getDatabase(MetaStoreUtils.DEFAULT_DATABASE_NAME), "testPartTbl");
    assertEquals(fs.getFileStatus(dfsPath).getPermission(),FsPermission.valueOf("drwx-wx--x"));

    pig.setBatchOn();
    pig.registerQuery("A  = load 'build.xml' as (line:chararray);");
    pig.registerQuery("store A into 'testPartTbl' using "+HCatStorer.class.getName()+"('a=part');");
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.Warehouse

        return 0;
      }
      else{
        try{
          Hive db = Hive.get();
          Path dbPath = new Warehouse(conf).getDatabasePath(db.getDatabase(dbName));
          FileSystem fs = dbPath.getFileSystem(conf);
          if(perms != null){
            fs.setPermission(dbPath, perms);
          }
          if(null != grp){
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.Warehouse

        assertTrue("Unable to create table: " + tableName, false);
      }

      // get table
      Table ft = null;
      Warehouse wh = new Warehouse(hiveConf);
      try {
        ft = hm.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName);
        ft.checkValidity();
        assertEquals("Table names didn't match for table: " + tableName, tbl
            .getTableName(), ft.getTableName());
        assertEquals("Table owners didn't match for table: " + tableName, tbl
            .getOwner(), ft.getOwner());
        assertEquals("Table retention didn't match for table: " + tableName,
            tbl.getRetention(), ft.getRetention());
        assertEquals("Data location is not set correctly",
            wh.getTablePath(hm.getDatabase(DEFAULT_DATABASE_NAME), tableName).toString(),
            ft.getDataLocation().toString());
        // now that URI is set correctly, set the original table's uri and then
        // compare the two tables
        tbl.setDataLocation(ft.getDataLocation());
        assertTrue("Tables doesn't match: " + tableName, ft.getTTable().equals(
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.Warehouse

      } catch (HiveException e) {
        System.err.println(StringUtils.stringifyException(e));
        assertTrue("Unable to create table: " + tableName, false);
      }
      // get table
      Warehouse wh = new Warehouse(hiveConf);
      Table ft = null;
      try {
        ft = hm.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName);
        assertNotNull("Unable to fetch table", ft);
        ft.checkValidity();
        assertEquals("Table names didn't match for table: " + tableName, tbl
            .getTableName(), ft.getTableName());
        assertEquals("Table owners didn't match for table: " + tableName, tbl
            .getOwner(), ft.getOwner());
        assertEquals("Table retention didn't match for table: " + tableName,
            tbl.getRetention(), ft.getRetention());
        assertEquals("Data location is not set correctly",
            wh.getTablePath(hm.getDatabase(DEFAULT_DATABASE_NAME), tableName).toString(),
            ft.getDataLocation().toString());
        // now that URI is set correctly, set the original table's uri and then
        // compare the two tables
        tbl.setDataLocation(ft.getDataLocation());
        assertTrue("Tables  doesn't match: " + tableName, ft.getTTable()
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.Warehouse

    hcatConf.set("fs.pfile.impl", "org.apache.hadoop.fs.ProxyLocalFileSystem");
    URI fsuri = cluster.getFileSystem().getUri();
    Path whPath = new Path(fsuri.getScheme(), fsuri.getAuthority(), "/user/hive/warehouse");
    hcatConf.set(HiveConf.ConfVars.HADOOPFS.varname, fsuri.toString());
    hcatConf.set(ConfVars.METASTOREWAREHOUSE.varname, whPath.toString());
    wh = new Warehouse(hcatConf);
    SessionState.start(new CliSessionState(hcatConf));

    hcatDriver = new HCatDriver();
  }
View Full Code Here

Examples of org.apache.hadoop.hive.metastore.Warehouse

        conf.set("import.destination.table", tblDesc.getTableName());
        for (AddPartitionDesc addPartitionDesc : partitionDescs) {
          addPartitionDesc.setTableName(tblDesc.getTableName());
        }
      }
      Warehouse wh = new Warehouse(conf);
      try {
        Table table = db.getTable(tblDesc.getTableName());
        checkTable(table, tblDesc);
        LOG.debug("table " + tblDesc.getTableName()
            + " exists: metadata checked");
        tableExists = true;
        conf.set("import.destination.dir", table.getDataLocation().toString());
        if (table.isPartitioned()) {
          LOG.debug("table partitioned");
          for (AddPartitionDesc addPartitionDesc : partitionDescs) {
            if (db.getPartition(table, addPartitionDesc.getPartSpec(), false) == null) {
              rootTasks.add(addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc));
            } else {
              throw new SemanticException(
                  ErrorMsg.PARTITION_EXISTS
                      .getMsg(partSpecToString(addPartitionDesc.getPartSpec())));
            }
          }
        } else {
          LOG.debug("table non-partitioned");
          checkTargetLocationEmpty(fs, new Path(table.getDataLocation()
              .toString()));
          loadTable(fromURI, table);
        }
        outputs.add(new WriteEntity(table));
      } catch (InvalidTableException e) {
        LOG.debug("table " + tblDesc.getTableName() + " does not exist");

        Task<?> t = TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
            tblDesc), conf);
        Table table = new Table(dbname, tblDesc.getTableName());
        conf.set("import.destination.dir",
            wh.getTablePath(db.getDatabase(db.getCurrentDatabase()),
                tblDesc.getTableName()).toString());
        if ((tblDesc.getPartCols() != null) && (tblDesc.getPartCols().size() != 0)) {
          for (AddPartitionDesc addPartitionDesc : partitionDescs) {
            t.addDependentTask(
                addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc));
          }
        } else {
          LOG.debug("adding dependent CopyWork/MoveWork for table");
          if (tblDesc.isExternal() && (tblDesc.getLocation() == null)) {
            LOG.debug("Importing in place, no emptiness check, no copying/loading");
            Path dataPath = new Path(fromURI.toString(), "data");
            tblDesc.setLocation(dataPath.toString());
          } else {
            Path tablePath = null;
            if (tblDesc.getLocation() != null) {
              tablePath = new Path(tblDesc.getLocation());
            } else {
              tablePath = wh.getTablePath(db.getDatabase(db.getCurrentDatabase()), tblDesc.getTableName());
            }
            checkTargetLocationEmpty(fs, tablePath);
            t.addDependentTask(loadTable(fromURI, table));
          }
        }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.