Package org.apache.hadoop.hive.ql.exec

Examples of org.apache.hadoop.hive.ql.exec.JoinOperator$IntermediateObject


    }

    // Type checking and implicit type conversion for join keys
    genJoinOperatorTypeCheck(joinSrcOp, srcOps);

    JoinOperator joinOp = (JoinOperator) genJoinOperatorChildren(joinTree,
      joinSrcOp, srcOps, omitOpts);
    joinContext.put(joinOp, joinTree);

    Operator op = joinOp;
    for(ASTNode condn : joinTree.getPostJoinFilters() ) {
View Full Code Here


    }

    @Override
    public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
        Object... nodeOutputs) {
      JoinOperator op = (JoinOperator)nd;
      TableAccessCtx tableAccessCtx = (TableAccessCtx)procCtx;
      Map<String, List<String>> tableToKeysMap = new HashMap<String, List<String>>();

      List<Operator<? extends OperatorDesc>> parentOps = op.getParentOperators();

      // Get the key column names for each side of the join,
      // and check if the keys are all constants
      // or columns (not expressions). If yes, proceed.
      QBJoinTree joinTree = pGraphContext.getJoinContext().get(op);
View Full Code Here

    if (!context.conf.getBoolVar(HiveConf.ConfVars.HIVECONVERTJOIN)) {
      return null;
    }

    JoinOperator joinOp = (JoinOperator) nd;
    // if we have traits, and table info is present in the traits, we know the
    // exact number of buckets. Else choose the largest number of estimated
    // reducers from the parent operators.
    int numBuckets = -1;
    int estimatedBuckets = -1;
    if (context.conf.getBoolVar(HiveConf.ConfVars.HIVE_CONVERT_JOIN_BUCKET_MAPJOIN_TEZ)) {
      for (Operator<? extends OperatorDesc>parentOp : joinOp.getParentOperators()) {
        if (parentOp.getOpTraits().getNumBuckets() > 0) {
          numBuckets = (numBuckets < parentOp.getOpTraits().getNumBuckets()) ?
              parentOp.getOpTraits().getNumBuckets() : numBuckets;
        }
View Full Code Here

   * The Node Processor for Column Pruning on Join Operators.
   */
  public static class ColumnPrunerJoinProc implements NodeProcessor {
    public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx ctx,
        Object... nodeOutputs) throws SemanticException {
      JoinOperator op = (JoinOperator) nd;
      pruneJoinOperator(ctx, op, op.getConf(), op.getColumnExprMap(), null,
          false);
      return null;
    }
View Full Code Here

      Set<Map.Entry<JoinOperator, QBJoinTree>> joinCtx = pGraphContext.getJoinContext().entrySet();
      Iterator<Map.Entry<JoinOperator, QBJoinTree>> joinCtxIter = joinCtx.iterator();
      while (joinCtxIter.hasNext()) {
        Map.Entry<JoinOperator, QBJoinTree> joinEntry = joinCtxIter.next();
        JoinOperator joinOp = joinEntry.getKey();
        QBJoinTree qbJoin = joinEntry.getValue();
        int mapJoinPos = mapSideJoin(joinOp, qbJoin);
        if (mapJoinPos >= 0) {
          MapJoinOperator mapJoinOp = generateMapJoinOperator(pactx, joinOp, qbJoin, mapJoinPos);
          listMapJoinOps.add(mapJoinOp);
View Full Code Here

      // be called for leafs.
      assert(!stack.isEmpty());

      // LineageCtx
      LineageCtx lCtx = (LineageCtx) procCtx;
      JoinOperator op = (JoinOperator)nd;
      JoinDesc jd = op.getConf();

      // The input operator to the join is always a reduce sink operator
      ReduceSinkOperator inpOp = (ReduceSinkOperator)getParent(stack);
      ReduceSinkDesc rd = inpOp.getConf();
      int tag = rd.getTag();

      // Iterate over the outputs of the join operator and merge the
      // dependencies of the columns that corresponding to the tag.
      int cnt = 0;
      List<ExprNodeDesc> exprs = jd.getExprs().get((byte)tag);
      for(ColumnInfo ci : op.getSchema().getSignature()) {
        if (jd.getReversedExprs().get(ci.getInternalName()) != tag) {
          continue;
        }

        // Otherwise look up the expression corresponding to this ci
View Full Code Here

    // pRS-pJOIN-cRS
    @Override
    public Object process(ReduceSinkOperator cRS, ReduceSinkDeduplicateProcCtx dedupCtx)
        throws SemanticException {
      JoinOperator pJoin =
          CorrelationUtilities.findPossibleParent(cRS, JoinOperator.class, dedupCtx.trustScript());
      if (pJoin != null && merge(cRS, pJoin, dedupCtx.minReducer())) {
        pJoin.getConf().setFixedAsSorted(true);
        CorrelationUtilities.replaceReduceSinkWithSelectOperator(
            cRS, dedupCtx.getPctx(), dedupCtx);
        return true;
      }
      return false;
View Full Code Here

    @Override
    public Object process(ReduceSinkOperator cRS, GroupByOperator cGBY,
        ReduceSinkDeduplicateProcCtx dedupCtx)
        throws SemanticException {
      Operator<?> start = CorrelationUtilities.getStartForGroupBy(cRS);
      JoinOperator pJoin =
          CorrelationUtilities.findPossibleParent(
              start, JoinOperator.class, dedupCtx.trustScript());
      if (pJoin != null && merge(cRS, pJoin, dedupCtx.minReducer())) {
        pJoin.getConf().setFixedAsSorted(true);
        CorrelationUtilities.removeReduceSinkForGroupBy(
            cRS, cGBY, dedupCtx.getPctx(), dedupCtx);
        return true;
      }
      return false;
View Full Code Here

          LOG.info("Operator " + current.getIdentifier() + " " +
              current.getName() + " is correlated");
          Operator<? extends OperatorDesc> childOperator =
              CorrelationUtilities.getSingleChild(current, true);
          if (childOperator instanceof JoinOperator) {
            JoinOperator joinOp = (JoinOperator) childOperator;
            JoinCondDesc[] joinConds = joinOp.getConf().getConds();
            List<Operator<? extends OperatorDesc>> rsOps = joinOp.getParentOperators();
            LinkedHashSet<ReduceSinkOperator> correlatedRsOps =
                new LinkedHashSet<ReduceSinkOperator>();
            analyzeReduceSinkOperatorsOfJoinOperator(joinConds, rsOps, current, correlatedRsOps);
            correlatedReduceSinkOperators.addAll(correlatedRsOps);
          } else {
View Full Code Here

    @Override
    public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
        Object... nodeOutputs) throws SemanticException {

      BucketingSortingCtx bctx = (BucketingSortingCtx)procCtx;
      JoinOperator jop = (JoinOperator)nd;
      List<ColumnInfo> colInfos = jop.getSchema().getSignature();
      Byte[] order = jop.getConf().getTagOrder();

      BucketCol[] newBucketCols = null;
      SortCol[] newSortCols = null;

      for (int i = 0; i < jop.getParentOperators().size(); i++) {

        Operator<? extends OperatorDesc> parent = jop.getParentOperators().get(i);

        // The caller of this method should guarantee this
        assert(parent instanceof ReduceSinkOperator);

        ReduceSinkOperator rop = (ReduceSinkOperator)jop.getParentOperators().get(i);

        String sortOrder = rop.getConf().getOrder();
        List<BucketCol> bucketCols = new ArrayList<BucketCol>();
        List<SortCol> sortCols = new ArrayList<SortCol>();
        // Go through the Reduce keys and find the matching column(s) in the reduce values
        for (int keyIndex = 0; keyIndex < rop.getConf().getKeyCols().size(); keyIndex++) {
          for (int valueIndex = 0; valueIndex < rop.getConf().getValueCols().size();
              valueIndex++) {

            if (new ExprNodeDescEqualityWrapper(rop.getConf().getValueCols().get(valueIndex)).
                equals(new ExprNodeDescEqualityWrapper(rop.getConf().getKeyCols().get(
                    keyIndex)))) {

              String colName = rop.getSchema().getSignature().get(valueIndex).getInternalName();
              bucketCols.add(new BucketCol(colName, keyIndex));
              sortCols.add(new SortCol(colName, keyIndex, sortOrder.charAt(keyIndex)));
              break;
            }
          }
        }

        if (bucketCols.isEmpty()) {
          assert(sortCols.isEmpty());
          continue;
        }

        if (newBucketCols == null) {
          assert(newSortCols == null);
          // The number of join keys is equal to the number of keys in every reducer, although
          // not every key may map to a value in the reducer
          newBucketCols = new BucketCol[rop.getConf().getKeyCols().size()];
          newSortCols = new SortCol[rop.getConf().getKeyCols().size()];
        } else {
          assert(newSortCols != null);
        }

        byte tag = (byte)rop.getConf().getTag();
        List<ExprNodeDesc> exprs = jop.getConf().getExprs().get(tag);

        int colInfosOffset = 0;
        int orderValue = order[tag];
        // Columns are output from the join from the different reduce sinks in the order of their
        // offsets
        for (byte orderIndex = 0; orderIndex < order.length; orderIndex++) {
          if (order[orderIndex] < orderValue) {
            colInfosOffset += jop.getConf().getExprs().get(orderIndex).size();
          }
        }

        findBucketingSortingColumns(exprs, colInfos, bucketCols, sortCols, newBucketCols,
            newSortCols, colInfosOffset);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.exec.JoinOperator$IntermediateObject

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.