Package org.apache.hadoop.hbase.mapred

Examples of org.apache.hadoop.hbase.mapred.TableSplit


    throws IOException {
    String jobString = job.get(HCatConstants.HCAT_KEY_JOB_INFO);
    InputJobInfo inputJobInfo = (InputJobInfo) HCatUtil.deserialize(jobString);

    String tableName = job.get(TableInputFormat.INPUT_TABLE);
    TableSplit tSplit = (TableSplit) split;
    HbaseSnapshotRecordReader recordReader = new HbaseSnapshotRecordReader(inputJobInfo, job);
    inputFormat.setConf(job);
    Scan inputScan = inputFormat.getScan();
    // TODO: Make the caching configurable by the user
    inputScan.setCaching(200);
    inputScan.setCacheBlocks(false);
    Scan sc = new Scan(inputScan);
    sc.setStartRow(tSplit.getStartRow());
    sc.setStopRow(tSplit.getEndRow());
    recordReader.setScan(sc);
    recordReader.setHTable(new HTable(job, tableName));
    recordReader.init();
    return recordReader;
  }
View Full Code Here


  private InputSplit[] convertSplits(List<org.apache.hadoop.mapreduce.InputSplit> splits) {
    InputSplit[] converted = new InputSplit[splits.size()];
    for (int i = 0; i < splits.size(); i++) {
      org.apache.hadoop.hbase.mapreduce.TableSplit tableSplit =
        (org.apache.hadoop.hbase.mapreduce.TableSplit) splits.get(i);
      TableSplit newTableSplit = new TableSplit(tableSplit.getTableName(),
        tableSplit.getStartRow(),
        tableSplit.getEndRow(), tableSplit.getRegionLocation());
      converted[i] = newTableSplit;
    }
    return converted;
View Full Code Here

        throws IOException {
        String jobString = job.get(HCatConstants.HCAT_KEY_JOB_INFO);
        InputJobInfo inputJobInfo = (InputJobInfo) HCatUtil.deserialize(jobString);

        String tableName = job.get(TableInputFormat.INPUT_TABLE);
        TableSplit tSplit = (TableSplit) split;
        HbaseSnapshotRecordReader recordReader = new HbaseSnapshotRecordReader(inputJobInfo, job);
        inputFormat.setConf(job);
        Scan inputScan = inputFormat.getScan();
        // TODO: Make the caching configurable by the user
        inputScan.setCaching(200);
        inputScan.setCacheBlocks(false);
        Scan sc = new Scan(inputScan);
        sc.setStartRow(tSplit.getStartRow());
        sc.setStopRow(tSplit.getEndRow());
        recordReader.setScan(sc);
        recordReader.setHTable(new HTable(job, tableName));
        recordReader.init();
        return recordReader;
    }
View Full Code Here

    private InputSplit[] convertSplits(List<org.apache.hadoop.mapreduce.InputSplit> splits) {
        InputSplit[] converted = new InputSplit[splits.size()];
        for (int i = 0; i < splits.size(); i++) {
            org.apache.hadoop.hbase.mapreduce.TableSplit tableSplit =
                (org.apache.hadoop.hbase.mapreduce.TableSplit) splits.get(i);
            TableSplit newTableSplit = new TableSplit(tableSplit.getTableName(),
                tableSplit.getStartRow(),
                tableSplit.getEndRow(), tableSplit.getRegionLocation());
            converted[i] = newTableSplit;
        }
        return converted;
View Full Code Here

  private TableSplit split;
   
  public HBaseSplit() {
    super((Path) null, 0, 0, (String[]) null);
    hbaseColumnMapping = "";
    split = new TableSplit();
  }
View Full Code Here

            throws IOException {
        String jobString = job.get(HCatConstants.HCAT_KEY_JOB_INFO);
        InputJobInfo inputJobInfo = (InputJobInfo) HCatUtil.deserialize(jobString);

        String tableName = job.get(TableInputFormat.INPUT_TABLE);
        TableSplit tSplit = (TableSplit) split;
        HbaseSnapshotRecordReader recordReader = new HbaseSnapshotRecordReader(inputJobInfo, job);
        inputFormat.setConf(job);
        Scan inputScan = inputFormat.getScan();
        // TODO: Make the caching configurable by the user
        inputScan.setCaching(200);
        inputScan.setCacheBlocks(false);
        Scan sc = new Scan(inputScan);
        sc.setStartRow(tSplit.getStartRow());
        sc.setStopRow(tSplit.getEndRow());
        recordReader.setScan(sc);
        recordReader.setHTable(new HTable(job, tableName));
        recordReader.init();
        return recordReader;
    }
View Full Code Here

    private InputSplit[] convertSplits(List<org.apache.hadoop.mapreduce.InputSplit> splits) {
        InputSplit[] converted = new InputSplit[splits.size()];
        for (int i = 0; i < splits.size(); i++) {
            org.apache.hadoop.hbase.mapreduce.TableSplit tableSplit =
                    (org.apache.hadoop.hbase.mapreduce.TableSplit) splits.get(i);
            TableSplit newTableSplit = new TableSplit(tableSplit.getTableName(),
                    tableSplit.getStartRow(),
                    tableSplit.getEndRow(), tableSplit.getRegionLocation());
            converted[i] = newTableSplit;
        }
        return converted;
View Full Code Here

    throws IOException {
    String jobString = job.get(HCatConstants.HCAT_KEY_JOB_INFO);
    InputJobInfo inputJobInfo = (InputJobInfo) HCatUtil.deserialize(jobString);

    String tableName = job.get(TableInputFormat.INPUT_TABLE);
    TableSplit tSplit = (TableSplit) split;
    HbaseSnapshotRecordReader recordReader = new HbaseSnapshotRecordReader(inputJobInfo, job);
    inputFormat.setConf(job);
    Scan inputScan = inputFormat.getScan();
    // TODO: Make the caching configurable by the user
    inputScan.setCaching(200);
    inputScan.setCacheBlocks(false);
    Scan sc = new Scan(inputScan);
    sc.setStartRow(tSplit.getStartRow());
    sc.setStopRow(tSplit.getEndRow());
    recordReader.setScan(sc);
    recordReader.setHTable(new HTable(job, tableName));
    recordReader.init();
    return recordReader;
  }
View Full Code Here

  private InputSplit[] convertSplits(List<org.apache.hadoop.mapreduce.InputSplit> splits) {
    InputSplit[] converted = new InputSplit[splits.size()];
    for (int i = 0; i < splits.size(); i++) {
      org.apache.hadoop.hbase.mapreduce.TableSplit tableSplit =
        (org.apache.hadoop.hbase.mapreduce.TableSplit) splits.get(i);
      TableSplit newTableSplit = new TableSplit(tableSplit.getTableName(),
        tableSplit.getStartRow(),
        tableSplit.getEndRow(), tableSplit.getRegionLocation());
      converted[i] = newTableSplit;
    }
    return converted;
View Full Code Here

   *      JobConf, Reporter)
   */
  public RecordReader<TypedBytesWritable, TypedBytesWritable> getRecordReader(
      InputSplit split, JobConf job, Reporter reporter)
  throws IOException {
    TableSplit tSplit = (TableSplit) split;
    TypedBytesTableRecordReader trr = this.tableRecordReader;
    // if no table record reader was provided use default
    if (trr == null) {
      trr = new TypedBytesTableRecordReader();
    }
    trr.setStartRow(tSplit.getStartRow());
    trr.setEndRow(tSplit.getEndRow());
    trr.setHTable(this.table);
    trr.setInputColumns(this.inputColumns);
    trr.setRowFilter(this.rowFilter);
    trr.init();
    return trr;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.mapred.TableSplit

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.