Package org.apache.hadoop.hive.ql.exec.persistence

Examples of org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer$ReusableGetAdaptor


    if((hasFilter(alias) && filterMaps[alias].length > 0) || joinValues[alias].size() > 0) {
      value = JoinUtil.computeMapJoinValues(row, joinValues[alias],
        joinValuesObjectInspectors[alias], joinFilters[alias], joinFilterObjectInspectors[alias],
        filterMaps == null ? null : filterMaps[alias]);
    }
    MapJoinTableContainer tableContainer = mapJoinTables[alias];
    MapJoinRowContainer rowContainer = tableContainer.get(key);
    if (rowContainer == null) {
      if(value.length != 0) {
        rowContainer = new MapJoinRowContainer();
        rowContainer.add(value);
      } else {
        rowContainer = EMPTY_ROW_CONTAINER;
      }
      rowNumber++;
      if (rowNumber > hashTableScale && rowNumber % hashTableScale == 0) {
        memoryExhaustionHandler.checkMemoryStatus(tableContainer.size(), rowNumber);
      }
      tableContainer.put(key, rowContainer);
    } else if (rowContainer == EMPTY_ROW_CONTAINER) {
      rowContainer = rowContainer.copy();
      rowContainer.add(value);
      tableContainer.put(key, rowContainer);
    } else {
      rowContainer.add(value);
    }
  }
View Full Code Here


        // get tmp file URI
        String tmpURI = this.getExecContext().getLocalWork().getTmpFileURI();
        LOG.info("Temp URI for side table: " + tmpURI);
        for (byte tag = 0; tag < mapJoinTables.length; tag++) {
          // get the key and value
          MapJoinTableContainer tableContainer = mapJoinTables[tag];
          if (tableContainer == null) {
            continue;
          }
          // get current input file name
          String bigBucketFileName = getExecContext().getCurrentBigBucketFile();
          String fileName = getExecContext().getLocalWork().getBucketFileName(bigBucketFileName);
          // get the tmp URI path; it will be a hdfs path if not local mode
          String dumpFilePrefix = conf.getDumpFilePrefix();
          String tmpURIPath = Utilities.generatePath(tmpURI, dumpFilePrefix, tag, fileName);
          console.printInfo(Utilities.now() + "\tDump the side-table into file: " + tmpURIPath);
          // get the hashtable file and path
          Path path = new Path(tmpURIPath);
          FileSystem fs = path.getFileSystem(hconf);
          ObjectOutputStream out = new ObjectOutputStream(new BufferedOutputStream(fs.create(path), 4096));
          try {
            mapJoinTableSerdes[tag].persist(out, tableContainer);
          } finally {
            out.close();
          }
          tableContainer.clear();
          console.printInfo(Utilities.now() + "\tUpload 1 File to: " + tmpURIPath);
        }
      }
      super.closeOp(abort);
    } catch (Exception e) {
View Full Code Here

      LogicalInput input = tezContext.getInput(inputName);

      try {
        KeyValueReader kvReader = (KeyValueReader) input.getReader();

        MapJoinTableContainer tableContainer = new HashMapWrapper(hashTableThreshold,
            hashTableLoadFactor);

        // simply read all the kv pairs into the hashtable.

        while (kvReader.next()) {
          // We pass key in as reference, to find out quickly if optimized keys can be used.
          // However, we do not reuse the object since we are putting them into the hashmap.
          lastKey = MapJoinKey.read(output, lastKey, mapJoinTableSerdes[pos].getKeyContext(),
              (Writable)kvReader.getCurrentKey(), false);

          LazyFlatRowContainer values = (LazyFlatRowContainer)tableContainer.get(lastKey);
          if (values == null) {
            values = new LazyFlatRowContainer();
            tableContainer.put(lastKey, values);
          }
          values.add(mapJoinTableSerdes[pos].getValueContext(),
              (BytesWritable)kvReader.getCurrentValue(), useLazyRows);
        }
View Full Code Here

    if((hasFilter(alias) && filterMaps[alias].length > 0) || joinValues[alias].size() > 0) {
      value = JoinUtil.computeMapJoinValues(row, joinValues[alias],
        joinValuesObjectInspectors[alias], joinFilters[alias], joinFilterObjectInspectors[alias],
        filterMaps == null ? null : filterMaps[alias]);
    }
    MapJoinTableContainer tableContainer = mapJoinTables[alias];
    MapJoinRowContainer rowContainer = tableContainer.get(key);
    if (rowContainer == null) {
      if(value.length != 0) {
        rowContainer = new MapJoinEagerRowContainer();
        rowContainer.addRow(value);
      } else {
        rowContainer = EMPTY_ROW_CONTAINER;
      }
      rowNumber++;
      if (rowNumber > hashTableScale && rowNumber % hashTableScale == 0) {
        memoryExhaustionHandler.checkMemoryStatus(tableContainer.size(), rowNumber);
      }
      tableContainer.put(key, rowContainer);
    } else if (rowContainer == EMPTY_ROW_CONTAINER) {
      rowContainer = rowContainer.copy();
      rowContainer.addRow(value);
      tableContainer.put(key, rowContainer);
    } else {
      rowContainer.addRow(value);
    }
  }
View Full Code Here

    // get tmp file URI
    Path tmpURI = getExecContext().getLocalWork().getTmpPath();
    LOG.info("Temp URI for side table: " + tmpURI);
    for (byte tag = 0; tag < mapJoinTables.length; tag++) {
      // get the key and value
      MapJoinTableContainer tableContainer = mapJoinTables[tag];
      if (tableContainer == null) {
        continue;
      }
      // get current input file name
      String bigBucketFileName = getExecContext().getCurrentBigBucketFile();
      String fileName = getExecContext().getLocalWork().getBucketFileName(bigBucketFileName);
      // get the tmp URI path; it will be a hdfs path if not local mode
      String dumpFilePrefix = conf.getDumpFilePrefix();
      Path path = Utilities.generatePath(tmpURI, dumpFilePrefix, tag, fileName);
      console.printInfo(Utilities.now() + "\tDump the side-table into file: " + path);
      // get the hashtable file and path
      FileSystem fs = path.getFileSystem(hconf);
      ObjectOutputStream out = new ObjectOutputStream(new BufferedOutputStream(fs.create(path), 4096));
      try {
        mapJoinTableSerdes[tag].persist(out, tableContainer);
      } finally {
        out.close();
      }
      tableContainer.clear();
      FileStatus status = fs.getFileStatus(path);
      console.printInfo(Utilities.now() + "\tUploaded 1 File to: " + path +
          " (" + status.getLen() + " bytes)");
    }
  }
View Full Code Here

          }
        }
        isFirstKey = false;
        Long keyCountObj = parentKeyCounts.get(pos);
        long keyCount = (keyCountObj == null) ? -1 : keyCountObj.longValue();
        MapJoinTableContainer tableContainer = useOptimizedTables
            ? new MapJoinBytesTableContainer(hconf, valCtx, keyCount, memUsage)
            : new HashMapWrapper(hconf, keyCount);

        while (kvReader.next()) {
          rowCount++;
          lastKey = tableContainer.putRow(keyCtx, (Writable)kvReader.getCurrentKey(),
              valCtx, (Writable)kvReader.getCurrentValue());
        }

        tableContainer.seal();
        mapJoinTables[pos] = tableContainer;
      } catch (IOException e) {
        throw new HiveException(e);
      } catch (SerDeException e) {
        throw new HiveException(e);
View Full Code Here

      LogicalInput input = tezContext.getInput(inputName);

      try {
        KeyValueReader kvReader = (KeyValueReader) input.getReader();

        MapJoinTableContainer tableContainer = new HashMapWrapper(hashTableThreshold,
            hashTableLoadFactor);

        // simply read all the kv pairs into the hashtable.

        while (kvReader.next()) {
          // We pass key in as reference, to find out quickly if optimized keys can be used.
          // However, we do not reuse the object since we are putting them into the hashmap.
          lastKey = MapJoinKey.read(output, lastKey, mapJoinTableSerdes[pos].getKeyContext(),
              (Writable)kvReader.getCurrentKey(), false);

          LazyFlatRowContainer values = (LazyFlatRowContainer)tableContainer.get(lastKey);
          if (values == null) {
            values = new LazyFlatRowContainer();
            tableContainer.put(lastKey, values);
          }
          values.add(mapJoinTableSerdes[pos].getValueContext(),
              (BytesWritable)kvReader.getCurrentValue(), useLazyRows);
        }
View Full Code Here

    if((hasFilter(alias) && filterMaps[alias].length > 0) || joinValues[alias].size() > 0) {
      value = JoinUtil.computeMapJoinValues(row, joinValues[alias],
        joinValuesObjectInspectors[alias], joinFilters[alias], joinFilterObjectInspectors[alias],
        filterMaps == null ? null : filterMaps[alias]);
    }
    MapJoinTableContainer tableContainer = mapJoinTables[alias];
    MapJoinRowContainer rowContainer = tableContainer.get(key);
    if (rowContainer == null) {
      if(value.length != 0) {
        rowContainer = new MapJoinEagerRowContainer();
        rowContainer.addRow(value);
      } else {
        rowContainer = EMPTY_ROW_CONTAINER;
      }
      rowNumber++;
      if (rowNumber > hashTableScale && rowNumber % hashTableScale == 0) {
        memoryExhaustionHandler.checkMemoryStatus(tableContainer.size(), rowNumber);
      }
      tableContainer.put(key, rowContainer);
    } else if (rowContainer == EMPTY_ROW_CONTAINER) {
      rowContainer = rowContainer.copy();
      rowContainer.addRow(value);
      tableContainer.put(key, rowContainer);
    } else {
      rowContainer.addRow(value);
    }
  }
View Full Code Here

    // get tmp file URI
    Path tmpURI = getExecContext().getLocalWork().getTmpPath();
    LOG.info("Temp URI for side table: " + tmpURI);
    for (byte tag = 0; tag < mapJoinTables.length; tag++) {
      // get the key and value
      MapJoinTableContainer tableContainer = mapJoinTables[tag];
      if (tableContainer == null) {
        continue;
      }
      // get current input file name
      String bigBucketFileName = getExecContext().getCurrentBigBucketFile();
      String fileName = getExecContext().getLocalWork().getBucketFileName(bigBucketFileName);
      // get the tmp URI path; it will be a hdfs path if not local mode
      String dumpFilePrefix = conf.getDumpFilePrefix();
      Path path = Utilities.generatePath(tmpURI, dumpFilePrefix, tag, fileName);
      console.printInfo(Utilities.now() + "\tDump the side-table into file: " + path);
      // get the hashtable file and path
      FileSystem fs = path.getFileSystem(hconf);
      ObjectOutputStream out = new ObjectOutputStream(new BufferedOutputStream(fs.create(path), 4096));
      try {
        mapJoinTableSerdes[tag].persist(out, tableContainer);
      } finally {
        out.close();
      }
      tableContainer.clear();
      FileStatus status = fs.getFileStatus(path);
      console.printInfo(Utilities.now() + "\tUploaded 1 File to: " + path +
          " (" + status.getLen() + " bytes)");
    }
  }
View Full Code Here

    if((hasFilter(alias) && filterMaps[alias].length > 0) || joinValues[alias].size() > 0) {
      value = JoinUtil.computeMapJoinValues(row, joinValues[alias],
        joinValuesObjectInspectors[alias], joinFilters[alias], joinFilterObjectInspectors[alias],
        filterMaps == null ? null : filterMaps[alias]);
    }
    MapJoinTableContainer tableContainer = mapJoinTables[alias];
    MapJoinRowContainer rowContainer = tableContainer.get(key);
    if (rowContainer == null) {
      if(value.length != 0) {
        rowContainer = new MapJoinRowContainer();
        rowContainer.add(value);
      } else {
        rowContainer = EMPTY_ROW_CONTAINER;
      }
      rowNumber++;
      if (rowNumber > hashTableScale && rowNumber % hashTableScale == 0) {
        memoryExhaustionHandler.checkMemoryStatus(tableContainer.size(), rowNumber);
      }
      tableContainer.put(key, rowContainer);
    } else if (rowContainer == EMPTY_ROW_CONTAINER) {
      rowContainer = rowContainer.copy();
      rowContainer.add(value);
      tableContainer.put(key, rowContainer);
    } else {
      rowContainer.add(value);
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer$ReusableGetAdaptor

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.