Package org.apache.hadoop.hive.metastore

Examples of org.apache.hadoop.hive.metastore.Warehouse


        assertTrue("Unable to create table: " + tableName, false);
      }

      // get table
      Table ft = null;
      Warehouse wh = new Warehouse(hiveConf);
      try {
        ft = hm.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName);
        ft.checkValidity();
        assertEquals("Table names didn't match for table: " + tableName, tbl.getName(), ft.getName());
        assertEquals("Table owners didn't match for table: " + tableName, tbl.getOwner(), ft.getOwner());
        assertEquals("Table retention didn't match for table: " + tableName, tbl.getRetention(), ft.getRetention());
        assertEquals("Data location is not set correctly", wh.getDefaultTablePath(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName).toString(), ft.getDataLocation().toString());
        // now that URI is set correctly, set the original table's uri and then compare the two tables
        tbl.setDataLocation(ft.getDataLocation());
        assertTrue("Tables doesn't match: " + tableName, ft.getTTable().equals(tbl.getTTable()));
        assertEquals("Serde is not set correctly", tbl.getDeserializer().getClass().getName(), ft.getDeserializer().getClass().getName());
        assertEquals("SerializationLib is not set correctly", tbl.getSerializationLib(), LazySimpleSerDe.class.getName());
View Full Code Here


        conf.set("import.destination.table", tblDesc.getTableName());
        for (AddPartitionDesc addPartitionDesc : partitionDescs) {
          addPartitionDesc.setTableName(tblDesc.getTableName());
        }
      }
      Warehouse wh = new Warehouse(conf);
      try {
        Table table = db.getTable(tblDesc.getTableName());
        checkTable(table, tblDesc);
        LOG.debug("table " + tblDesc.getTableName()
            + " exists: metadata checked");
        tableExists = true;
        conf.set("import.destination.dir", table.getDataLocation().toString());
        if (table.isPartitioned()) {
          LOG.debug("table partitioned");
          for (AddPartitionDesc addPartitionDesc : partitionDescs) {
            if (db.getPartition(table, addPartitionDesc.getPartSpec(), false) == null) {
              rootTasks.add(addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc));
            } else {
              throw new SemanticException(
                  ErrorMsg.PARTITION_EXISTS
                      .getMsg(partSpecToString(addPartitionDesc.getPartSpec())));
            }
          }
        } else {
          LOG.debug("table non-partitioned");
          checkTargetLocationEmpty(fs, new Path(table.getDataLocation()
              .toString()));
          loadTable(fromURI, table);
        }
        outputs.add(new WriteEntity(table));
      } catch (InvalidTableException e) {
        LOG.debug("table " + tblDesc.getTableName() + " does not exist");

        Task<?> t = TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
            tblDesc), conf);
        Table table = new Table(dbname, tblDesc.getTableName());
        conf.set("import.destination.dir",
            wh.getTablePath(db.getDatabase(db.getCurrentDatabase()),
                tblDesc.getTableName()).toString());
        if ((tblDesc.getPartCols() != null) && (tblDesc.getPartCols().size() != 0)) {
          for (AddPartitionDesc addPartitionDesc : partitionDescs) {
            t.addDependentTask(
                addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc));
          }
        } else {
          LOG.debug("adding dependent CopyWork/MoveWork for table");
          if (tblDesc.isExternal() && (tblDesc.getLocation() == null)) {
            LOG.debug("Importing in place, no emptiness check, no copying/loading");
            Path dataPath = new Path(fromURI.toString(), "data");
            tblDesc.setLocation(dataPath.toString());
          } else {
            Path tablePath = null;
            if (tblDesc.getLocation() != null) {
              tablePath = new Path(tblDesc.getLocation());
            } else {
              tablePath = wh.getTablePath(db.getDatabase(db.getCurrentDatabase()), tblDesc.getTableName());
            }
            checkTargetLocationEmpty(fs, tablePath);
            t.addDependentTask(loadTable(fromURI, table));
          }
        }
View Full Code Here

              dumpTable = db.newTable(qb.getTableDesc().getTableName());
              if (!db.databaseExists(dumpTable.getDbName())) {
                throw new SemanticException("ERROR: The database " + dumpTable.getDbName()
                    + " does not exist.");
              }
              Warehouse wh = new Warehouse(conf);
              targetPath = wh.getTablePath(db.getDatabase(dumpTable.getDbName()), dumpTable
                  .getTableName());
            } catch (HiveException e) {
              throw new SemanticException(e);
            } catch (MetaException e) {
              throw new SemanticException(e);
View Full Code Here

    }
  }

  private void deleteDir(Path dir) throws HiveException {
    try {
      Warehouse wh = new Warehouse(conf);
      wh.deleteDir(dir, true);
    } catch (MetaException e) {
      throw new HiveException(e);
    }
  }
View Full Code Here

              dumpTable = db.newTable(qb.getTableDesc().getTableName());
              if (!db.databaseExists(dumpTable.getDbName())) {
                throw new SemanticException("ERROR: The database " + dumpTable.getDbName()
                    + " does not exist.");
              }
              Warehouse wh = new Warehouse(conf);
              targetPath = wh.getTablePath(db.getDatabase(dumpTable.getDbName()), dumpTable
                  .getTableName());
            } catch (HiveException e) {
              throw new SemanticException(e);
            } catch (MetaException e) {
              throw new SemanticException(e);
View Full Code Here

    }
  }

  private void deleteDir(Path dir) throws HiveException {
    try {
      Warehouse wh = new Warehouse(conf);
      wh.deleteDir(dir, true);
    } catch (MetaException e) {
      throw new HiveException(e);
    }
  }
View Full Code Here

    hcatConf.set("fs.pfile.impl", "org.apache.hadoop.fs.ProxyLocalFileSystem");
    URI fsuri = cluster.getFileSystem().getUri();
    Path whPath = new Path(fsuri.getScheme(), fsuri.getAuthority(), "/user/hive/warehouse");
    hcatConf.set(HiveConf.ConfVars.HADOOPFS.varname, fsuri.toString());
    hcatConf.set(ConfVars.METASTOREWAREHOUSE.varname, whPath.toString());
    wh = new Warehouse(hcatConf);
    SessionState.start(new CliSessionState(hcatConf));

    hcatDriver = new HCatDriver();
  }
View Full Code Here

    hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName());
    hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
    hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
    hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
    clientWH = new Warehouse(hcatConf);
    msc = new HiveMetaStoreClient(hcatConf,null);
    System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
    System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
  }
View Full Code Here

 
  @Override
  public void setConf(Configuration conf) {
    super.setConf(conf);
    try {
      this.wh = new Warehouse(conf);
    } catch (MetaException ex) {
      throw new RuntimeException(ex);
    }
  }
View Full Code Here

    String username = ShimLoader.getHadoopShims().getShortUserName(ugi);
   
    whPath = new Path(whDir);
    whFs = whPath.getFileSystem(conf);
   
    wh = new Warehouse(conf);
    hive = Hive.get(conf);
   
    //clean up mess in HMS
    HcatTestUtils.cleanupHMS(hive, wh, perm700);
   
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.metastore.Warehouse

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.