Examples of BucketCol


Examples of org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.BucketCol

      if (newBucketCols != null) {
        int bucketIndex = indexOfColName(bucketCols, columnExpr.getColumn());
        if (bucketIndex != -1) {
          if (newBucketCols[bucketIndex] == null) {
            newBucketCols[bucketIndex] = new BucketCol();
          }
          newBucketCols[bucketIndex].addAlias(
              colInfos.get(colInfosIndex).getInternalName(), colInfosIndex);
        }
      }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.BucketCol

  private static List<BucketCol> getNewBucketCols(List<BucketCol> bucketCols,
      List<ColumnInfo> colInfos) {

    List<BucketCol> newBucketCols = new ArrayList<BucketCol>(bucketCols.size());
    for (int i = 0; i < bucketCols.size(); i++) {
      BucketCol bucketCol = new BucketCol();
      for (Integer index : bucketCols.get(i).getIndexes()) {
        // The only time this condition should be false is in the case of dynamic partitioning
        // where the data is bucketed on a dynamic partitioning column and the FileSinkOperator is
        // being processed.  In this case, the dynamic partition column will not appear in
        // colInfos, and due to the limitations of dynamic partitioning, they will appear at the
        // end of the input schema.  Since the order of the columns hasn't changed, and no new
        // columns have been added/removed, it is safe to assume that these will have indexes
        // greater than or equal to colInfos.size().
        if (index < colInfos.size()) {
          bucketCol.addAlias(colInfos.get(index).getInternalName(), index);
        } else {
          return null;
        }
      }
      newBucketCols.add(bucketCol);
View Full Code Here

Examples of org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.BucketCol

            if (new ExprNodeDescEqualityWrapper(rop.getConf().getValueCols().get(valueIndex)).
                equals(new ExprNodeDescEqualityWrapper(rop.getConf().getKeyCols().get(
                    keyIndex)))) {

              String colName = rop.getSchema().getSignature().get(valueIndex).getInternalName();
              bucketCols.add(new BucketCol(colName, keyIndex));
              sortCols.add(new SortCol(colName, keyIndex, sortOrder.charAt(keyIndex)));
              break;
            }
          }
        }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.BucketCol

        boolean valueColFound = false;
        for (int j = 0; j < rop.getConf().getValueCols().size(); j++) {
          if (new ExprNodeDescEqualityWrapper(rop.getConf().getValueCols().get(j)).equals(
              new ExprNodeDescEqualityWrapper(rop.getConf().getPartitionCols().get(i)))) {

            bucketCols.add(new BucketCol(
                rop.getSchema().getSignature().get(j).getInternalName(), j));
            valueColFound = true;
            break;
          }
        }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.BucketCol

      List<SortCol> sortCols = new ArrayList<SortCol>();
      assert rop.getConf().getKeyCols().size() <= rop.getSchema().getSignature().size();
      // Group by operators select the key cols, so no need to find them in the values
      for (int i = 0; i < rop.getConf().getKeyCols().size(); i++) {
        String colName = rop.getSchema().getSignature().get(i).getInternalName();
        bucketCols.add(new BucketCol(colName, i));
        sortCols.add(new SortCol(colName, i, sortOrder.charAt(i)));
      }
      bctx.setBucketedCols(rop, bucketCols);
      bctx.setSortedCols(rop, sortCols);
    }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.BucketCol

      if (newBucketCols != null) {
        int bucketIndex = indexOfColName(bucketCols, columnExpr.getColumn());
        if (bucketIndex != -1) {
          if (newBucketCols[bucketIndex] == null) {
            newBucketCols[bucketIndex] = new BucketCol();
          }
          newBucketCols[bucketIndex].addAlias(
              colInfos.get(colInfosIndex).getInternalName(), colInfosIndex);
        }
      }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.BucketCol

  private static List<BucketCol> getNewBucketCols(List<BucketCol> bucketCols,
      List<ColumnInfo> colInfos) {

    List<BucketCol> newBucketCols = new ArrayList<BucketCol>(bucketCols.size());
    for (int i = 0; i < bucketCols.size(); i++) {
      BucketCol bucketCol = new BucketCol();
      for (Integer index : bucketCols.get(i).getIndexes()) {
        // The only time this condition should be false is in the case of dynamic partitioning
        // where the data is bucketed on a dynamic partitioning column and the FileSinkOperator is
        // being processed.  In this case, the dynamic partition column will not appear in
        // colInfos, and due to the limitations of dynamic partitioning, they will appear at the
        // end of the input schema.  Since the order of the columns hasn't changed, and no new
        // columns have been added/removed, it is safe to assume that these will have indexes
        // greater than or equal to colInfos.size().
        if (index < colInfos.size()) {
          bucketCol.addAlias(colInfos.get(index).getInternalName(), index);
        } else {
          return null;
        }
      }
      newBucketCols.add(bucketCol);
View Full Code Here

Examples of org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.BucketCol

      }
      int index = ExprNodeDescUtils.indexOf(partitionCol, outputValues);
      if (index < 0) {
        return Collections.emptyList();
      }
      bucketCols.add(new BucketCol(((ExprNodeColumnDesc) partitionCol).getColumn(), index));
    }
    // If the partition columns can't all be found in the values then the data is not bucketed
    return bucketCols;
  }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.BucketCol

            String vname = outputValNames.get(vindex);
            if (newBucketCols[keyIndex] != null) {
              newBucketCols[keyIndex].addAlias(vname, vindex);
              newSortCols[keyIndex].addAlias(vname, vindex);
            } else {
              newBucketCols[keyIndex] = new BucketCol(vname, vindex);
              newSortCols[keyIndex] = new SortCol(vname, vindex, sortOrder.charAt(keyIndex));
            }
          }
        }
      }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.BucketCol

      List<SortCol> sortCols = new ArrayList<SortCol>();
      assert rop.getConf().getKeyCols().size() <= rop.getSchema().getSignature().size();
      // Group by operators select the key cols, so no need to find them in the values
      for (int i = 0; i < rop.getConf().getKeyCols().size(); i++) {
        String colName = rop.getSchema().getSignature().get(i).getInternalName();
        bucketCols.add(new BucketCol(colName, i));
        sortCols.add(new SortCol(colName, i, sortOrder.charAt(i)));
      }
      bctx.setBucketedCols(rop, bucketCols);
      bctx.setSortedCols(rop, sortCols);
    }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.