Package org.apache.hadoop.hive.ql.exec

Examples of org.apache.hadoop.hive.ql.exec.HashTableSinkOperator$HashTableSinkObjectCtx


    // generate empty hashtable for this (byte)tag
    String tmpURI = this.getWork().getTmpFileURI();

    String fileName = work.getBucketFileName(bigBucketFileName);

    HashTableSinkOperator htso = (HashTableSinkOperator)childOp;
    String tmpURIPath = Utilities.generatePath(tmpURI, htso.getConf().getDumpFilePrefix(),
        tag, fileName);
    console.printInfo(Utilities.now() + "\tDump the hashtable into file: " + tmpURIPath);
    Path path = new Path(tmpURIPath);
    FileSystem fs = path.getFileSystem(job);
    ObjectOutputStream out = new ObjectOutputStream(fs.create(path));
View Full Code Here


      } catch (Exception e) {
        e.printStackTrace();
      }

      HashTableSinkDesc hashTableSinkDesc = new HashTableSinkDesc(mapJoinOp.getConf());
      HashTableSinkOperator hashTableSinkOp = (HashTableSinkOperator) OperatorFactory
          .get(hashTableSinkDesc);

      // set hashtable memory usage
      float hashtableMemoryUsage;
      if (context.isFollowedByGroupBy()) {
        hashtableMemoryUsage = context.getParseCtx().getConf().getFloatVar(
            HiveConf.ConfVars.HIVEHASHTABLEFOLLOWBYGBYMAXMEMORYUSAGE);
      } else {
        hashtableMemoryUsage = context.getParseCtx().getConf().getFloatVar(
            HiveConf.ConfVars.HIVEHASHTABLEMAXMEMORYUSAGE);
      }
      hashTableSinkOp.getConf().setHashtableMemoryUsage(hashtableMemoryUsage);

      // get the last operator for processing big tables
      int bigTable = mapJoinOp.getConf().getPosBigTable();
      Byte[] order = mapJoinOp.getConf().getTagOrder();
      int bigTableAlias = (int) order[bigTable];

      // the parent ops for hashTableSinkOp
      List<Operator<? extends OperatorDesc>> smallTablesParentOp =
        new ArrayList<Operator<? extends OperatorDesc>>();
      List<Operator<? extends OperatorDesc>> dummyOperators =
        new ArrayList<Operator<? extends OperatorDesc>>();
      // get all parents
      List<Operator<? extends OperatorDesc>> parentsOp = mapJoinOp.getParentOperators();
      for (int i = 0; i < parentsOp.size(); i++) {
        if (i == bigTableAlias) {
          smallTablesParentOp.add(null);
          continue;
        }
        Operator<? extends OperatorDesc> parent = parentsOp.get(i);
        // let hashtable Op be the child of this parent
        parent.replaceChild(mapJoinOp, hashTableSinkOp);
        // keep the parent id correct
        smallTablesParentOp.add(parent);

        // create an new operator: HashTable DummyOpeator, which share the table desc
        HashTableDummyDesc desc = new HashTableDummyDesc();
        HashTableDummyOperator dummyOp = (HashTableDummyOperator) OperatorFactory.get(desc);
        TableDesc tbl;

        if (parent.getSchema() == null) {
          if (parent instanceof TableScanOperator) {
            tbl = ((TableScanOperator) parent).getTableDesc();
          } else {
            throw new SemanticException();
          }
        } else {
          // get parent schema
          RowSchema rowSchema = parent.getSchema();
          tbl = PlanUtils.getIntermediateFileTableDesc(PlanUtils.getFieldSchemasFromRowSchema(
              rowSchema, ""));
        }
        dummyOp.getConf().setTbl(tbl);
        // let the dummy op be the parent of mapjoin op
        mapJoinOp.replaceParent(parent, dummyOp);
        List<Operator<? extends OperatorDesc>> dummyChildren =
          new ArrayList<Operator<? extends OperatorDesc>>();
        dummyChildren.add(mapJoinOp);
        dummyOp.setChildOperators(dummyChildren);
        // add this dummy op to the dummp operator list
        dummyOperators.add(dummyOp);
      }
      hashTableSinkOp.setParentOperators(smallTablesParentOp);
      for (Operator<? extends OperatorDesc> op : dummyOperators) {
        context.addDummyParentOp(op);
      }
      return null;
    }
View Full Code Here

      } catch (Exception e) {
        e.printStackTrace();
      }

      HashTableSinkDesc hashTableSinkDesc = new HashTableSinkDesc(mapJoinOp.getConf());
      HashTableSinkOperator hashTableSinkOp = (HashTableSinkOperator) OperatorFactory
          .get(hashTableSinkDesc);

      // set hashtable memory usage
      float hashtableMemoryUsage;
      if (context.isFollowedByGroupBy()) {
        hashtableMemoryUsage = context.getParseCtx().getConf().getFloatVar(
            HiveConf.ConfVars.HIVEHASHTABLEFOLLOWBYGBYMAXMEMORYUSAGE);
      } else {
        hashtableMemoryUsage = context.getParseCtx().getConf().getFloatVar(
            HiveConf.ConfVars.HIVEHASHTABLEMAXMEMORYUSAGE);
      }
      hashTableSinkOp.getConf().setHashtableMemoryUsage(hashtableMemoryUsage);

      // get the last operator for processing big tables
      int bigTable = mapJoinOp.getConf().getPosBigTable();
      Byte[] order = mapJoinOp.getConf().getTagOrder();
      int bigTableAlias = (int) order[bigTable];

      // the parent ops for hashTableSinkOp
      List<Operator<? extends Serializable>> smallTablesParentOp = new ArrayList<Operator<? extends Serializable>>();
      List<Operator<? extends Serializable>> dummyOperators = new ArrayList<Operator<? extends Serializable>>();
      // get all parents
      List<Operator<? extends Serializable>> parentsOp = mapJoinOp.getParentOperators();
      for (int i = 0; i < parentsOp.size(); i++) {
        if (i == bigTableAlias) {
          smallTablesParentOp.add(null);
          continue;
        }
        Operator<? extends Serializable> parent = parentsOp.get(i);
        // let hashtable Op be the child of this parent
        parent.replaceChild(mapJoinOp, hashTableSinkOp);
        // keep the parent id correct
        smallTablesParentOp.add(parent);

        // create an new operator: HashTable DummyOpeator, which share the table desc
        HashTableDummyDesc desc = new HashTableDummyDesc();
        HashTableDummyOperator dummyOp = (HashTableDummyOperator) OperatorFactory.get(desc);
        TableDesc tbl;

        if (parent.getSchema() == null) {
          if (parent instanceof TableScanOperator) {
            tbl = ((TableScanOperator) parent).getTableDesc();
          } else {
            throw new SemanticException();
          }
        } else {
          // get parent schema
          RowSchema rowSchema = parent.getSchema();
          tbl = PlanUtils.getIntermediateFileTableDesc(PlanUtils.getFieldSchemasFromRowSchema(
              rowSchema, ""));
        }
        dummyOp.getConf().setTbl(tbl);
        // let the dummy op be the parent of mapjoin op
        mapJoinOp.replaceParent(parent, dummyOp);
        List<Operator<? extends Serializable>> dummyChildren = new ArrayList<Operator<? extends Serializable>>();
        dummyChildren.add(mapJoinOp);
        dummyOp.setChildOperators(dummyChildren);
        // add this dummy op to the dummp operator list
        dummyOperators.add(dummyOp);
      }
      hashTableSinkOp.setParentOperators(smallTablesParentOp);
      for (Operator<? extends Serializable> op : dummyOperators) {
        context.addDummyParentOp(op);
      }
      return null;
    }
View Full Code Here

      return;
    }
    JobConf job = new JobConf(hconf);
    MapredLocalTask localTask = new MapredLocalTask(localWork, job, false);

    HashTableSinkOperator sink = new TemporaryHashSinkOperator(desc);
    sink.setParentOperators(new ArrayList<Operator<? extends OperatorDesc>>(directWorks));

    for (Operator<?> operator : directWorks) {
      if (operator != null) {
        operator.setChildOperators(Arrays.<Operator<? extends OperatorDesc>>asList(sink));
      }
    }
    localTask.setExecContext(context);
    localTask.startForward(inputFileName);

    MapJoinTableContainer[] tables = sink.getMapJoinTables();
    for (int i = 0; i < sink.getNumParent(); i++) {
      if (sink.getParentOperators().get(i) != null) {
        mapJoinTables[i] = tables[i];
      }
    }

    Arrays.fill(tables, null);
View Full Code Here

    // generate empty hashtable for this (byte)tag
    Path tmpPath = this.getWork().getTmpPath();

    String fileName = work.getBucketFileName(bigBucketFileName);

    HashTableSinkOperator htso = (HashTableSinkOperator)childOp;
    Path path = Utilities.generatePath(tmpPath, htso.getConf().getDumpFilePrefix(),
        tag, fileName);
    console.printInfo(Utilities.now() + "\tDump the hashtable into file: " + path);
    FileSystem fs = path.getFileSystem(job);
    ObjectOutputStream out = new ObjectOutputStream(fs.create(path));
    try {
View Full Code Here

      mapJoinOp.getConf().resetOrder();

      HiveConf conf = context.getParseCtx().getConf();

      HashTableSinkDesc hashTableSinkDesc = new HashTableSinkDesc(mapJoinOp.getConf());
      HashTableSinkOperator hashTableSinkOp = (HashTableSinkOperator) OperatorFactory
          .get(hashTableSinkDesc);

      // set hashtable memory usage
      float hashtableMemoryUsage;
      if (context.isFollowedByGroupBy()) {
        hashtableMemoryUsage = conf.getFloatVar(
            HiveConf.ConfVars.HIVEHASHTABLEFOLLOWBYGBYMAXMEMORYUSAGE);
      } else {
        hashtableMemoryUsage = conf.getFloatVar(
            HiveConf.ConfVars.HIVEHASHTABLEMAXMEMORYUSAGE);
      }
      mapJoinOp.getConf().setHashTableMemoryUsage(hashtableMemoryUsage);
      LOG.info("Setting max memory usage to " + hashtableMemoryUsage + " for table sink "
          + (context.isFollowedByGroupBy() ? "" : "not") + " followed by group by");
      hashTableSinkOp.getConf().setHashtableMemoryUsage(hashtableMemoryUsage);

      // get the last operator for processing big tables
      int bigTable = mapJoinOp.getConf().getPosBigTable();
      Byte[] orders = mapJoinOp.getConf().getTagOrder();

      // todo: support tez/vectorization
      boolean useNontaged = conf.getBoolVar(
          HiveConf.ConfVars.HIVECONVERTJOINUSENONSTAGED) &&
          conf.getVar(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("mr") &&
          !conf.getBoolVar(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED);

      // the parent ops for hashTableSinkOp
      List<Operator<? extends OperatorDesc>> smallTablesParentOp =
        new ArrayList<Operator<? extends OperatorDesc>>();
      List<Operator<? extends OperatorDesc>> dummyOperators =
        new ArrayList<Operator<? extends OperatorDesc>>();
      List<Operator<? extends OperatorDesc>> directOperators =
          new ArrayList<Operator<? extends OperatorDesc>>();
      // get all parents
      List<Operator<? extends OperatorDesc>> parentsOp = mapJoinOp.getParentOperators();
      for (int i = 0; i < parentsOp.size(); i++) {
        if (i == bigTable) {
          smallTablesParentOp.add(null);
          directOperators.add(null);
          continue;
        }
        Operator<? extends OperatorDesc> parent = parentsOp.get(i);
        boolean directFetchable = useNontaged &&
            (parent instanceof TableScanOperator || parent instanceof MapJoinOperator);
        if (directFetchable) {
          // no filter, no projection. no need to stage
          smallTablesParentOp.add(null);
          directOperators.add(parent);
          hashTableSinkDesc.getKeys().put(orders[i], null);
          hashTableSinkDesc.getExprs().put(orders[i], null);
          hashTableSinkDesc.getFilters().put(orders[i], null);
        } else {
          // keep the parent id correct
          smallTablesParentOp.add(parent);
          directOperators.add(null);
        }
        // let hashtable Op be the child of this parent
        parent.replaceChild(mapJoinOp, hashTableSinkOp);
        if (directFetchable) {
          parent.setChildOperators(null);
        }

        // create an new operator: HashTable DummyOpeator, which share the table desc
        HashTableDummyDesc desc = new HashTableDummyDesc();
        HashTableDummyOperator dummyOp = (HashTableDummyOperator) OperatorFactory.get(desc);
        TableDesc tbl;

        if (parent.getSchema() == null) {
          if (parent instanceof TableScanOperator) {
            tbl = ((TableScanOperator) parent).getTableDesc();
          } else {
            throw new SemanticException("Expected parent operator of type TableScanOperator." +
              "Found " + parent.getClass().getName() + " instead.");
          }
        } else {
          // get parent schema
          RowSchema rowSchema = parent.getSchema();
          tbl = PlanUtils.getIntermediateFileTableDesc(PlanUtils.getFieldSchemasFromRowSchema(
              rowSchema, ""));
        }
        dummyOp.getConf().setTbl(tbl);
        // let the dummy op be the parent of mapjoin op
        mapJoinOp.replaceParent(parent, dummyOp);
        List<Operator<? extends OperatorDesc>> dummyChildren =
          new ArrayList<Operator<? extends OperatorDesc>>();
        dummyChildren.add(mapJoinOp);
        dummyOp.setChildOperators(dummyChildren);
        // add this dummy op to the dummp operator list
        dummyOperators.add(dummyOp);
      }
      hashTableSinkOp.setParentOperators(smallTablesParentOp);
      for (Operator<? extends OperatorDesc> op : dummyOperators) {
        context.addDummyParentOp(op);
      }
      if (hasAnyDirectFetch(directOperators)) {
        context.addDirectWorks(mapJoinOp, directOperators);
View Full Code Here

      return;
    }
    JobConf job = new JobConf(hconf);
    MapredLocalTask localTask = new MapredLocalTask(localWork, job, false);

    HashTableSinkOperator sink = new TemporaryHashSinkOperator(desc);
    sink.setParentOperators(new ArrayList<Operator<? extends OperatorDesc>>(directWorks));

    for (Operator<?> operator : directWorks) {
      if (operator != null) {
        operator.setChildOperators(Arrays.<Operator<? extends OperatorDesc>>asList(sink));
      }
    }
    localTask.setExecContext(context);
    localTask.startForward(inputFileName);

    MapJoinTableContainer[] tables = sink.getMapJoinTables();
    for (int i = 0; i < sink.getNumParent(); i++) {
      if (sink.getParentOperators().get(i) != null) {
        mapJoinTables[i] = tables[i];
      }
    }

    Arrays.fill(tables, null);
View Full Code Here

      mapJoinDesc.setHashTableMemoryUsage(hashtableMemoryUsage);
      LOG.info("Setting max memory usage to " + hashtableMemoryUsage + " for table sink "
          + (context.isFollowedByGroupBy() ? "" : "not") + " followed by group by");

      HashTableSinkDesc hashTableSinkDesc = new HashTableSinkDesc(mapJoinDesc);
      HashTableSinkOperator hashTableSinkOp = (HashTableSinkOperator) OperatorFactory
          .get(hashTableSinkDesc);

      // get the last operator for processing big tables
      int bigTable = mapJoinDesc.getPosBigTable();

      // todo: support tez/vectorization
      boolean useNontaged = conf.getBoolVar(
          HiveConf.ConfVars.HIVECONVERTJOINUSENONSTAGED) &&
          conf.getVar(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("mr") &&
          !conf.getBoolVar(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED);

      // the parent ops for hashTableSinkOp
      List<Operator<? extends OperatorDesc>> smallTablesParentOp =
        new ArrayList<Operator<? extends OperatorDesc>>();
      List<Operator<? extends OperatorDesc>> dummyOperators =
        new ArrayList<Operator<? extends OperatorDesc>>();
      List<Operator<? extends OperatorDesc>> directOperators =
          new ArrayList<Operator<? extends OperatorDesc>>();
      // get all parents
      List<Operator<? extends OperatorDesc>> parentsOp = mapJoinOp.getParentOperators();
      for (byte i = 0; i < parentsOp.size(); i++) {
        if (i == bigTable) {
          smallTablesParentOp.add(null);
          directOperators.add(null);
          continue;
        }
        Operator<? extends OperatorDesc> parent = parentsOp.get(i);
        boolean directFetchable = useNontaged &&
            (parent instanceof TableScanOperator || parent instanceof MapJoinOperator);
        if (directFetchable) {
          // no filter, no projection. no need to stage
          smallTablesParentOp.add(null);
          directOperators.add(parent);
          hashTableSinkDesc.getKeys().put(i, null);
          hashTableSinkDesc.getExprs().put(i, null);
          hashTableSinkDesc.getFilters().put(i, null);
        } else {
          // keep the parent id correct
          smallTablesParentOp.add(parent);
          directOperators.add(null);
          int[] valueIndex = mapJoinDesc.getValueIndex(i);
          if (valueIndex != null) {
            // remove values in key exprs
            // schema for value is already fixed in MapJoinProcessor#convertJoinOpMapJoinOp
            List<ExprNodeDesc> newValues = new ArrayList<ExprNodeDesc>();
            List<ExprNodeDesc> values = hashTableSinkDesc.getExprs().get(i);
            for (int index = 0; index < values.size(); index++) {
              if (valueIndex[index] < 0) {
                newValues.add(values.get(index));
              }
            }
            hashTableSinkDesc.getExprs().put(i, newValues);
          }
        }
        // let hashtable Op be the child of this parent
        parent.replaceChild(mapJoinOp, hashTableSinkOp);
        if (directFetchable) {
          parent.setChildOperators(null);
        }

        // create new operator: HashTable DummyOperator, which share the table desc
        HashTableDummyDesc desc = new HashTableDummyDesc();
        HashTableDummyOperator dummyOp = (HashTableDummyOperator) OperatorFactory.get(desc);
        TableDesc tbl;

        if (parent.getSchema() == null) {
          if (parent instanceof TableScanOperator) {
            tbl = ((TableScanOperator) parent).getTableDesc();
          } else {
            throw new SemanticException("Expected parent operator of type TableScanOperator." +
              "Found " + parent.getClass().getName() + " instead.");
          }
        } else {
          // get parent schema
          RowSchema rowSchema = parent.getSchema();
          tbl = PlanUtils.getIntermediateFileTableDesc(PlanUtils.getFieldSchemasFromRowSchema(
              rowSchema, ""));
        }
        dummyOp.getConf().setTbl(tbl);
        // let the dummy op be the parent of mapjoin op
        mapJoinOp.replaceParent(parent, dummyOp);
        List<Operator<? extends OperatorDesc>> dummyChildren =
          new ArrayList<Operator<? extends OperatorDesc>>();
        dummyChildren.add(mapJoinOp);
        dummyOp.setChildOperators(dummyChildren);
        // add this dummy op to the dummp operator list
        dummyOperators.add(dummyOp);
      }
      hashTableSinkOp.setParentOperators(smallTablesParentOp);
      for (Operator<? extends OperatorDesc> op : dummyOperators) {
        context.addDummyParentOp(op);
      }
      if (hasAnyDirectFetch(directOperators)) {
        context.addDirectWorks(mapJoinOp, directOperators);
View Full Code Here

      return;
    }
    JobConf job = new JobConf(hconf);
    MapredLocalTask localTask = new MapredLocalTask(localWork, job, false);

    HashTableSinkOperator sink = new TemporaryHashSinkOperator(desc);
    sink.setParentOperators(new ArrayList<Operator<? extends OperatorDesc>>(directWorks));

    for (Operator<?> operator : directWorks) {
      if (operator != null) {
        operator.setChildOperators(Arrays.<Operator<? extends OperatorDesc>>asList(sink));
      }
    }
    localTask.setExecContext(context);
    localTask.startForward(inputFileName);

    MapJoinTableContainer[] tables = sink.getMapJoinTables();
    for (int i = 0; i < sink.getNumParent(); i++) {
      if (sink.getParentOperators().get(i) != null) {
        mapJoinTables[i] = tables[i];
      }
    }

    Arrays.fill(tables, null);
View Full Code Here

    // generate empty hashtable for this (byte)tag
    Path tmpPath = this.getWork().getTmpPath();

    String fileName = work.getBucketFileName(bigBucketFileName);

    HashTableSinkOperator htso = (HashTableSinkOperator)childOp;
    Path path = Utilities.generatePath(tmpPath, htso.getConf().getDumpFilePrefix(),
        tag, fileName);
    console.printInfo(Utilities.now() + "\tDump the hashtable into file: " + path);
    FileSystem fs = path.getFileSystem(job);
    ObjectOutputStream out = new ObjectOutputStream(fs.create(path));
    try {
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.exec.HashTableSinkOperator$HashTableSinkObjectCtx

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.