Examples of VectorizationContext


Examples of org.apache.hadoop.hive.ql.exec.vector.VectorizationContext

  boolean validateExprNodeDesc(ExprNodeDesc desc, VectorExpressionDescriptor.Mode mode) {
    if (!validateExprNodeDescRecursive(desc)) {
      return false;
    }
    try {
      VectorizationContext vc = new ValidatorVectorizationContext();
      if (vc.getVectorExpression(desc, mode) == null) {
        // TODO: this cannot happen - VectorizationContext throws in such cases.
        return false;
      }
    } catch (Exception e) {
      if (LOG.isDebugEnabled()) {
View Full Code Here

Examples of org.apache.hadoop.hive.ql.exec.vector.VectorizationContext

      if (!isVirtualColumn(c)) {
        cmap.put(c.getInternalName(), columnCount++);
      }
    }

    VectorizationContext vc =  new VectorizationContext(cmap, columnCount);
    return vc;
  }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.exec.vector.VectorizationContext

    public Map<String, Map<Integer, String>> getScratchColumnVectorTypes() {
      Map<String, Map<Integer, String>> scratchColumnVectorTypes =
          new HashMap<String, Map<Integer, String>>();
      for (String onefile : scratchColumnContext.keySet()) {
        VectorizationContext vc = scratchColumnContext.get(onefile);
        Map<Integer, String> cmap = vc.getOutputColumnTypeMap();
        scratchColumnVectorTypes.put(onefile, cmap);
      }
      return scratchColumnVectorTypes;
    }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.exec.vector.VectorizationContext

    public Map<String, Map<String, Integer>> getScratchColumnMap() {
      Map<String, Map<String, Integer>> scratchColumnMap =
          new HashMap<String, Map<String, Integer>>();
      for(String oneFile: scratchColumnContext.keySet()) {
        VectorizationContext vc = scratchColumnContext.get(oneFile);
        Map<String, Integer> cmap = vc.getColumnMap();
        scratchColumnMap.put(oneFile, cmap);
      }
      return scratchColumnMap;
    }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.exec.vector.VectorizationContext

    public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
        Object... nodeOutputs) throws SemanticException {

      Operator<? extends OperatorDesc> op = (Operator<? extends OperatorDesc>) nd;

      VectorizationContext vContext = null;

      if (op instanceof TableScanOperator) {
        vContext = getVectorizationContext((TableScanOperator) op, physicalContext);
        for (String onefile : mWork.getPathToAliases().keySet()) {
          List<String> aliases = mWork.getPathToAliases().get(onefile);
          for (String alias : aliases) {
            Operator<? extends OperatorDesc> opRoot = mWork.getAliasToWork().get(alias);
            if (op == opRoot) {
              // The same vectorization context is copied multiple times into
              // the MapWork scratch columnMap
              // Each partition gets a copy
              //
              vContext.setFileKey(onefile);
              scratchColumnContext.put(onefile, vContext);
              break;
            }
          }
        }
        vContextsByTSOp.put(op, vContext);
      } else {
        assert stack.size() > 1;
        // Walk down the stack of operators until we found one willing to give us a context.
        // At the bottom will be the TS operator, guaranteed to have a context
        int i= stack.size()-2;
        while (vContext == null) {
          Operator<? extends OperatorDesc> opParent = (Operator<? extends OperatorDesc>) stack.get(i);
          vContext = vContextsByTSOp.get(opParent);
          --i;
        }
      }

      assert vContext != null;

      if ((op.getType().equals(OperatorType.REDUCESINK) || op.getType().equals(OperatorType.FILESINK)) &&
          op.getParentOperators().get(0).getType().equals(OperatorType.GROUPBY)) {
        // No need to vectorize
        if (!opsDone.contains(op)) {
          opsDone.add(op);
        }
      } else {
        try {
          if (!opsDone.contains(op)) {
            Operator<? extends OperatorDesc> vectorOp =
                vectorizeOperator(op, vContext);
            opsDone.add(op);
            if (vectorOp != op) {
              opsDone.add(vectorOp);
            }
            if (vectorOp instanceof VectorizationContextRegion) {
              VectorizationContextRegion vcRegion = (VectorizationContextRegion) vectorOp;
              VectorizationContext vOutContext = vcRegion.getOuputVectorizationContext();
              vContextsByTSOp.put(op, vOutContext);
              scratchColumnContext.put(vOutContext.getFileKey(), vOutContext);
            }
          }
        } catch (HiveException e) {
          throw new SemanticException(e);
        }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.exec.vector.VectorizationContext

    columnMap.put("col1", 0);
    columnMap.put("col2", 1);
    columnMap.put("col3", 2);

    //Generate vectorized expression
    vContext = new VectorizationContext(columnMap, 3);
  }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.exec.vector.VectorizationContext

  boolean validateExprNodeDesc(ExprNodeDesc desc, VectorExpressionDescriptor.Mode mode) {
    if (!validateExprNodeDescRecursive(desc)) {
      return false;
    }
    try {
      VectorizationContext vc = new ValidatorVectorizationContext();
      if (vc.getVectorExpression(desc, mode) == null) {
        // TODO: this cannot happen - VectorizationContext throws in such cases.
        LOG.info("getVectorExpression returned null");
        return false;
      }
    } catch (Exception e) {
View Full Code Here

Examples of org.apache.hadoop.hive.ql.exec.vector.VectorizationContext

    if (aggDesc.getParameters() != null && !validateExprNodeDesc(aggDesc.getParameters())) {
      return false;
    }
    // See if we can vectorize the aggregation.
    try {
      VectorizationContext vc = new ValidatorVectorizationContext();
      if (vc.getAggregatorExpression(aggDesc, isReduce) == null) {
        // TODO: this cannot happen - VectorizationContext throws in such cases.
        LOG.info("getAggregatorExpression returned null");
        return false;
      }
    } catch (Exception e) {
View Full Code Here

Examples of org.apache.hadoop.hive.ql.exec.vector.VectorizationContext

    }
    return true;
  }

  private boolean aggregatorsOutputIsPrimitive(AggregationDesc aggDesc, boolean isReduce) {
    VectorizationContext vc = new ValidatorVectorizationContext();
    VectorAggregateExpression vectorAggrExpr;
    try {
        vectorAggrExpr = vc.getAggregatorExpression(aggDesc, isReduce);
    } catch (Exception e) {
      // We should have already attempted to vectorize in validateAggregationDesc.
      LOG.info("Vectorization of aggreation should have succeeded ", e);
      return false;
    }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.exec.vector.VectorizationContext

      PhysicalContext pctx) {
    RowSchema rs = op.getSchema();

    // Add all non-virtual columns to make a vectorization context for
    // the TableScan operator.
    VectorizationContext vContext = new VectorizationContext();
    for (ColumnInfo c : rs.getSignature()) {
      // Earlier, validation code should have eliminated virtual columns usage (HIVE-5560).
      if (!isVirtualColumn(c)) {
        vContext.addInitialColumn(c.getInternalName());
      }
    }
    vContext.finishedAddingInitialColumns();
    return vContext;
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.