Package org.apache.hadoop.hive.metastore.api

Examples of org.apache.hadoop.hive.metastore.api.Index


        TException {

      startFunction("get_index_by_name", ": db=" + dbName + " tbl="
          + tblName + " index=" + indexName);

      Index ret = null;

      try {
        ret = executeWithRetry(new Command<Index>() {
          @Override
          public Index run(RawStore ms) throws Exception {
View Full Code Here


    }

    private Index get_index_by_name_core(final RawStore ms, final String db_name,
        final String tbl_name, final String index_name)
        throws MetaException, NoSuchObjectException, TException {
      Index index = ms.getIndex(db_name, tbl_name, index_name);

      if (index == null) {
        throw new NoSuchObjectException(db_name + "." + tbl_name
            + " index=" + index_name + " not found");
      }
View Full Code Here

    // configured not to ignore this
    boolean throwException =
      !ifExists && !HiveConf.getBoolVar(conf, ConfVars.DROPIGNORESNONEXISTENT);
    if (throwException) {
      try {
        Index idx = db.getIndex(tableName, indexName);
      } catch (HiveException e) {
        throw new SemanticException(ErrorMsg.INVALID_INDEX.getMsg(indexName));
      }
    }
View Full Code Here

  private List<Task<?>> getIndexBuilderMapRed(String baseTableName, String indexName,
      HashMap<String, String> partSpec) throws SemanticException {
    try {
      String dbName = db.getCurrentDatabase();
      Index index = db.getIndex(dbName, baseTableName, indexName);
      Table indexTbl = db.getTable(dbName, index.getIndexTableName());
      String baseTblName = index.getOrigTableName();
      Table baseTbl = db.getTable(dbName, baseTblName);

      String handlerCls = index.getIndexHandlerClass();
      HiveIndexHandler handler = HiveUtils.getIndexHandler(conf, handlerCls);

      List<Partition> indexTblPartitions = null;
      List<Partition> baseTblPartitions = null;
      if(indexTbl != null) {
View Full Code Here

  @Override
  public void generateIndexQuery(List<Index> indexes, ExprNodeDesc predicate,
    ParseContext pctx, HiveIndexQueryContext queryContext) {

    Index index = indexes.get(0);
    DecomposedPredicate decomposedPredicate = decomposePredicate(predicate, index,
                                                                  queryContext.getQueryPartitions());

    if (decomposedPredicate == null) {
      queryContext.setQueryTasks(null);
      return; // abort if we couldn't pull out anything from the predicate
    }

    // pass residual predicate back out for further processing
    queryContext.setResidualPredicate(decomposedPredicate.residualPredicate);
    // setup TableScanOperator to change input format for original query
    queryContext.setIndexInputFormat(HiveCompactIndexInputFormat.class.getName());

    // Build reentrant QL for index query
    StringBuilder qlCommand = new StringBuilder("INSERT OVERWRITE DIRECTORY ");

    String tmpFile = pctx.getContext().getMRTmpFileURI();
    queryContext.setIndexIntermediateFile(tmpFile);
    qlCommand.append( "\"" + tmpFile + "\" ");            // QL includes " around file name
    qlCommand.append("SELECT `_bucketname` ,  `_offsets` FROM ");
    qlCommand.append(HiveUtils.unparseIdentifier(index.getIndexTableName()));
    qlCommand.append(" WHERE ");

    String predicateString = decomposedPredicate.pushedPredicate.getExprString();
    qlCommand.append(predicateString);
View Full Code Here

  @Override
  public Index getIndex(String dbName, String origTableName, String indexName)
      throws MetaException {
    openTransaction();
    MIndex mIndex = this.getMIndex(dbName, origTableName, indexName);
    Index ret = convertToIndex(mIndex);
    commitTransaction();
    return ret;
  }
View Full Code Here

  private Index convertToIndex(MIndex mIndex) throws MetaException {
    if(mIndex == null) {
      return null;
    }

    return new Index(
    mIndex.getIndexName(),
    mIndex.getIndexHandlerClass(),
    mIndex.getOrigTable().getDatabase().getName(),
    mIndex.getOrigTable().getTableName(),
    mIndex.getCreateTime(),
View Full Code Here

      String lineDelim, String mapKeyDelim, String indexComment)
      throws HiveException {

    try {
      String dbName = getCurrentDatabase();
      Index old_index = null;
      try {
        old_index = getIndex(dbName, tableName, indexName);
      } catch (Exception e) {
      }
      if (old_index != null) {
        throw new HiveException("Index " + indexName + " already exists on table " + tableName + ", db=" + dbName);
      }

      org.apache.hadoop.hive.metastore.api.Table baseTbl = getMSC().getTable(dbName, tableName);
      if (baseTbl.getTableType() == TableType.VIRTUAL_VIEW.toString()) {
        throw new HiveException("tableName="+ tableName +" is a VIRTUAL VIEW. Index on VIRTUAL VIEW is not supported.");
      }

      if (indexTblName == null) {
        indexTblName = MetaStoreUtils.getIndexTableName(dbName, tableName, indexName);
      } else {
        org.apache.hadoop.hive.metastore.api.Table temp = null;
        try {
          temp = getMSC().getTable(dbName, indexTblName);
        } catch (Exception e) {
        }
        if (temp != null) {
          throw new HiveException("Table name " + indexTblName + " already exists. Choose another name.");
        }
      }

      org.apache.hadoop.hive.metastore.api.StorageDescriptor storageDescriptor = baseTbl.getSd().deepCopy();
      SerDeInfo serdeInfo = storageDescriptor.getSerdeInfo();
      if(serde != null) {
        serdeInfo.setSerializationLib(serde);
      } else {
        if (storageHandler == null) {
          serdeInfo.setSerializationLib(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
        } else {
          HiveStorageHandler sh = HiveUtils.getStorageHandler(getConf(), storageHandler);
          String serDeClassName = sh.getSerDeClass().getName();
          serdeInfo.setSerializationLib(serDeClassName);
        }
      }

      if (fieldDelim != null) {
        serdeInfo.getParameters().put(FIELD_DELIM, fieldDelim);
        serdeInfo.getParameters().put(SERIALIZATION_FORMAT, fieldDelim);
      }
      if (fieldEscape != null) {
        serdeInfo.getParameters().put(ESCAPE_CHAR, fieldEscape);
      }
      if (collItemDelim != null) {
        serdeInfo.getParameters().put(COLLECTION_DELIM, collItemDelim);
      }
      if (mapKeyDelim != null) {
        serdeInfo.getParameters().put(MAPKEY_DELIM, mapKeyDelim);
      }
      if (lineDelim != null) {
        serdeInfo.getParameters().put(LINE_DELIM, lineDelim);
      }

      if (serdeProps != null) {
        Iterator<Entry<String, String>> iter = serdeProps.entrySet()
          .iterator();
        while (iter.hasNext()) {
          Entry<String, String> m = iter.next();
          serdeInfo.getParameters().put(m.getKey(), m.getValue());
        }
      }

      storageDescriptor.setLocation(null);
      if (location != null) {
        storageDescriptor.setLocation(location);
      }
      storageDescriptor.setInputFormat(inputFormat);
      storageDescriptor.setOutputFormat(outputFormat);

      Map<String, String> params = new HashMap<String,String>();

      List<FieldSchema> indexTblCols = new ArrayList<FieldSchema>();
      List<Order> sortCols = new ArrayList<Order>();
      storageDescriptor.setBucketCols(null);
      int k = 0;
      for (int i = 0; i < storageDescriptor.getCols().size(); i++) {
        FieldSchema col = storageDescriptor.getCols().get(i);
        if (indexedCols.contains(col.getName())) {
          indexTblCols.add(col);
          sortCols.add(new Order(col.getName(), 1));
          k++;
        }
      }
      if (k != indexedCols.size()) {
        throw new RuntimeException(
            "Check the index columns, they should appear in the table being indexed.");
      }

      storageDescriptor.setCols(indexTblCols);
      storageDescriptor.setSortCols(sortCols);

      int time = (int) (System.currentTimeMillis() / 1000);
      org.apache.hadoop.hive.metastore.api.Table tt = null;
      HiveIndexHandler indexHandler = HiveUtils.getIndexHandler(this.getConf(), indexHandlerClass);

      if (indexHandler.usesIndexTable()) {
        tt = new org.apache.hadoop.hive.ql.metadata.Table(dbName, indexTblName).getTTable();
        List<FieldSchema> partKeys = baseTbl.getPartitionKeys();
        tt.setPartitionKeys(partKeys);
        tt.setTableType(TableType.INDEX_TABLE.toString());
        if (tblProps != null) {
          for (Entry<String, String> prop : tblProps.entrySet()) {
            tt.putToParameters(prop.getKey(), prop.getValue());
          }
        }
      }

      if(!deferredRebuild) {
        throw new RuntimeException("Please specify deferred rebuild using \" WITH DEFERRED REBUILD \".");
      }

      Index indexDesc = new Index(indexName, indexHandlerClass, dbName, tableName, time, time, indexTblName,
          storageDescriptor, params, deferredRebuild);
      indexDesc.getParameters().put("comment", indexComment);

      if (idxProps != null)
      {
        indexDesc.getParameters().putAll(idxProps);
      }

      indexHandler.analyzeIndexDefinition(baseTbl, indexDesc, tt);

      this.getMSC().createIndex(indexDesc, tt);
View Full Code Here

    canApplyCtx.setBaseTableName(baseTableName);
    canApplyCtx.populateRewriteVars(topOp);

    Map<Index, Set<String>> indexTableMap = getIndexToKeysMap(indexes.get(baseTable));
    Iterator<Index> indexMapItr = indexTableMap.keySet().iterator();
    Index index = null;
    while(indexMapItr.hasNext()){
      //we rewrite the original query using the first valid index encountered
      //this can be changed if we have a better mechanism to
      //decide which index will produce a better rewrite
      index = indexMapItr.next();
      canApply = canApplyCtx.isIndexUsableForQueryBranchRewrite(index,
          indexTableMap.get(index));
      if(canApply){
        canApply = checkIfAllRewriteCriteriaIsMet(canApplyCtx);
        //break here if any valid index is found to apply rewrite
        if(canApply){
          //check if aggregation function is set.
          //If not, set it using the only indexed column
          if(canApplyCtx.getAggFunction() == null){
            //strip of the start and end braces [...]
            String aggregationFunction = indexTableMap.get(index).toString();
            aggregationFunction = aggregationFunction.substring(1,
                aggregationFunction.length() - 1);
            canApplyCtx.setAggFunction("_count_of_" + aggregationFunction + "");
          }
        }
        break;
      }
    }
    indexTableName = index.getIndexTableName();

    if(canApply && topOps.containsValue(topOp)) {
      Iterator<String> topOpNamesItr = topOps.keySet().iterator();
      while(topOpNamesItr.hasNext()){
        String topOpName = topOpNamesItr.next();
View Full Code Here

   * @param indexTables
   * @return
   * @throws SemanticException
   */
  Map<Index, Set<String>> getIndexToKeysMap(List<Index> indexTables) throws SemanticException{
    Index index = null;
    Hive hiveInstance = hiveDb;
    Map<Index, Set<String>> indexToKeysMap = new LinkedHashMap<Index, Set<String>>();
     for (int idxCtr = 0; idxCtr < indexTables.size(); idxCtr++)  {
      final Set<String> indexKeyNames = new LinkedHashSet<String>();
      index = indexTables.get(idxCtr);
       //Getting index key columns
      StorageDescriptor sd = index.getSd();
      List<FieldSchema> idxColList = sd.getCols();
      for (FieldSchema fieldSchema : idxColList) {
        indexKeyNames.add(fieldSchema.getName());
      }
      assert indexKeyNames.size()==1;
      // Check that the index schema is as expected. This code block should
      // catch problems of this rewrite breaking when the AggregateIndexHandler
      // index is changed.
      List<String> idxTblColNames = new ArrayList<String>();
      try {
        Table idxTbl = hiveInstance.getTable(index.getDbName(),
            index.getIndexTableName());
        for (FieldSchema idxTblCol : idxTbl.getCols()) {
          idxTblColNames.add(idxTblCol.getName());
        }
      } catch (HiveException e) {
        LOG.error("Got exception while locating index table, " +
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.metastore.api.Index

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.