Package org.apache.hadoop.hbase.mapreduce

Examples of org.apache.hadoop.hbase.mapreduce.TableSplit


    public List<InputSplit> getSplits(org.apache.hadoop.mapreduce.JobContext context)
    throws IOException {
        List<InputSplit> splits = super.getSplits(context);
        ListIterator<InputSplit> splitIter = splits.listIterator();
        while (splitIter.hasNext()) {
            TableSplit split = (TableSplit) splitIter.next();
            byte[] startKey = split.getStartRow();
            byte[] endKey = split.getEndRow();
            // Skip if the region doesn't satisfy configured options.
            if ((skipRegion(CompareOp.LESS, startKey, lt_)) ||
                    (skipRegion(CompareOp.GREATER, endKey, gt_)) ||
                    (skipRegion(CompareOp.GREATER, endKey, gte_)) ||
                    (skipRegion(CompareOp.LESS_OR_EQUAL, startKey, lte_)) )  {
View Full Code Here


    @Override
    public WritableComparable<InputSplit> getSplitComparable(InputSplit split)
            throws IOException {
        return new WritableComparable<InputSplit>() {
            TableSplit tsplit = new TableSplit();

            @Override
            public void readFields(DataInput in) throws IOException {
                tsplit.readFields(in);
}

            @Override
            public void write(DataOutput out) throws IOException {
                tsplit.write(out);
            }

            @Override
            public int compareTo(InputSplit split) {
                return tsplit.compareTo((TableSplit) split);
            }
        };
    }
View Full Code Here

    InputSplit split,
    JobConf jobConf,
    final Reporter reporter) throws IOException {

    HBaseSplit hbaseSplit = (HBaseSplit) split;
    TableSplit tableSplit = hbaseSplit.getSplit();
    String hbaseTableName = jobConf.get(HBaseSerDe.HBASE_TABLE_NAME);
    setHTable(new HTable(HBaseConfiguration.create(jobConf), Bytes.toBytes(hbaseTableName)));
    String hbaseColumnsMapping = jobConf.get(HBaseSerDe.HBASE_COLUMNS_MAPPING);
    List<Integer> readColIDs = ColumnProjectionUtils.getReadColumnIDs(jobConf);
    List<ColumnMapping> columnsMapping = null;
View Full Code Here

      } else {
        throw new IOException(comparisonOp + " is not a supported comparison operator");
      }
    }
    if (tableSplit != null) {
      tableSplit = new TableSplit(
        tableSplit.getTableName(),
        startRow,
        stopRow,
        tableSplit.getRegionLocation());
    }
View Full Code Here

    @Override
    public WritableComparable<InputSplit> getSplitComparable(InputSplit split)
            throws IOException {
        return new WritableComparable<InputSplit>() {
            TableSplit tsplit = new TableSplit();

            @Override
            public void readFields(DataInput in) throws IOException {
                tsplit.readFields(in);
}

            @Override
            public void write(DataOutput out) throws IOException {
                tsplit.write(out);
            }

            @Override
            public int compareTo(InputSplit split) {
                return tsplit.compareTo((TableSplit) split);
            }
        };
    }
View Full Code Here

        scan.setTimeRange(startTime, endTime);
        if (versions >= 0) {
          scan.setMaxVersions(versions);
        }

        final TableSplit tableSplit = (TableSplit)(context.getInputSplit());
        HConnectionManager.execute(new HConnectable<Void>(conf) {
          @Override
          public Void connect(HConnection conn) throws IOException {
            String zkClusterKey = conf.get(NAME + ".peerQuorumAddress");
            Configuration peerConf = HBaseConfiguration.create(conf);
            ZKUtil.applyClusterKeyToConf(peerConf, zkClusterKey);

            TableName tableName = TableName.valueOf(conf.get(NAME + ".tableName"));
            // TODO: THis HTable doesn't get closed.  Fix!
            Table replicatedTable = new HTable(peerConf, tableName);
            scan.setStartRow(value.getRow());
            scan.setStopRow(tableSplit.getEndRow());
            replicatedScanner = replicatedTable.getScanner(scan);
            return null;
          }
        });
        currentCompareRowInPeerTable = replicatedScanner.next();
View Full Code Here

            InterruptedException {

        context.progress();
        context.getCounter(HBaseIndexerCounters.INPUT_ROWS).increment(1L);
        try {
            TableSplit tableSplit;
            if (context.getInputSplit() instanceof TableSplit) {
                tableSplit = (TableSplit) context.getInputSplit();
                indexer.indexRowData(ImmutableList.<RowData>of(new ResultWrappingRowData(result,
                        tableSplit.getTableName())));
            } else {
                throw new IOException("Input split not of type " + TableSplit.class + " but " +
                        context.getInputSplit().getClass());
            }
View Full Code Here

                        keys.getFirst()[i] : startRow;
                byte[] splitStop = (stopRow.length == 0 ||
                        Bytes.compareTo(keys.getSecond()[i], stopRow) <= 0) &&
                        keys.getSecond()[i].length > 0 ?
                        keys.getSecond()[i] : stopRow;
                InputSplit split = new TableSplit(table.getTableName(),
                        splitStart, splitStop, regionLocation);
                splits.add(split);
                if (log.isDebugEnabled()) {
                    log.debug("getSplits: split -> " + (count++) + " -> " + split);
                }
View Full Code Here

        // Build RecordScan
        RecordScan scan = getScan(repository);

        // Change the start/stop record IDs on the scan to the current split
        TableSplit split = (TableSplit)inputSplit;
        scan.setRawStartRecordId(split.getStartRow());
        scan.setRawStopRecordId(split.getEndRow());

        RecordScanner scanner = null;
        try {
            String hbaseTableName = Bytes.toString(split.getTableName());
            String repositoryTableName = RepoAndTableUtil.extractLilyTableName(repositoryName, hbaseTableName);
            scanner = lilyClient.getRepository(repositoryName).getTable(repositoryTableName).getScanner(scan);
        } catch (RepositoryException e) {
            Closer.close(lilyClient);
            throw new IOException("Error setting up RecordScanner", e);
View Full Code Here

        // Build RecordScan
        RecordScan scan = getScan(repository);

        // Change the start/stop record IDs on the scan to the current split
        TableSplit split = (TableSplit)inputSplit;
        scan.setRawStartRecordId(split.getStartRow());
        scan.setRawStopRecordId(split.getEndRow());

        IdRecordScanner scanner = null;
        try {
            String hbaseTableName = Bytes.toString(split.getTableName());
            String repositoryTableName = RepoAndTableUtil.extractLilyTableName(repositoryName, hbaseTableName);
            scanner = lilyClient.getRepository(repositoryName).getTable(repositoryTableName).getScannerWithIds(scan);
        } catch (RepositoryException e) {
            Closer.close(lilyClient);
            throw new IOException("Error setting up RecordScanner", e);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.mapreduce.TableSplit

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.