Package org.apache.hadoop.hbase

Examples of org.apache.hadoop.hbase.HTable$ClientScanner


    int numFound = doScan(scanner, printValues);
    Assert.assertEquals(NUM_ROWS, numFound);
  }

  private void scanTableWithRowFilter(final String tableName, final boolean printValues) throws IOException {
    HTable table = new HTable(conf, new Text(tableName));
    Map<Text, byte[]> columnMap = new HashMap<Text, byte[]>();
    columnMap.put(TEXT_COLUMN1, VALUE);
    RegExpRowFilter filter = new RegExpRowFilter(null, columnMap);
    HScannerInterface scanner = table.obtainScanner(columns, HConstants.EMPTY_START_ROW, filter);
    int numFound = doScan(scanner, printValues);
    Assert.assertEquals(NUM_ROWS, numFound);
  }
View Full Code Here


    // Create a table.
    HBaseAdmin admin = new HBaseAdmin(this.conf);
    admin.createTable(desc);

    // insert some data into the test table
    HTable table = new HTable(conf, new Text(SINGLE_REGION_TABLE_NAME));

    try {
      for(int i = 0; i < values.length; i++) {
        long lockid = table.startUpdate(new Text("row_"
            + String.format("%1$05d", i)));

        try {
          table.put(lockid, TEXT_INPUT_COLUMN, values[i]);
          table.commit(lockid, System.currentTimeMillis());
          lockid = -1;
        } finally {
          if (lockid != -1)
            table.abort(lockid);
        }
      }

      LOG.info("Print table contents before map/reduce for " +
        SINGLE_REGION_TABLE_NAME);
      scanTable(SINGLE_REGION_TABLE_NAME, true);

      @SuppressWarnings("deprecation")
      MiniMRCluster mrCluster = new MiniMRCluster(2, fs.getUri().toString(), 1);

      try {
        JobConf jobConf = new JobConf(conf, TestTableMapReduce.class);
        jobConf.setJobName("process column contents");
        jobConf.setNumMapTasks(1);
        jobConf.setNumReduceTasks(1);

        TableMap.initJob(SINGLE_REGION_TABLE_NAME, INPUT_COLUMN,
            ProcessContentsMapper.class, jobConf);

        TableReduce.initJob(SINGLE_REGION_TABLE_NAME,
            IdentityTableReduce.class, jobConf);
        LOG.info("Started " + SINGLE_REGION_TABLE_NAME);
        JobClient.runJob(jobConf);

        LOG.info("Print table contents after map/reduce for " +
          SINGLE_REGION_TABLE_NAME);
      scanTable(SINGLE_REGION_TABLE_NAME, true);

      // verify map-reduce results
      verify(SINGLE_REGION_TABLE_NAME);
      } finally {
        mrCluster.shutdown();
      }
    } finally {
      table.close();
    }
  }
View Full Code Here

    // Populate a table into multiple regions
    makeMultiRegionTable(conf, hCluster, fs, MULTI_REGION_TABLE_NAME,
        INPUT_COLUMN);
   
    // Verify table indeed has multiple regions
    HTable table = new HTable(conf, new Text(MULTI_REGION_TABLE_NAME));
    try {
      Text[] startKeys = table.getStartKeys();
      assertTrue(startKeys.length > 1);

      @SuppressWarnings("deprecation")
      MiniMRCluster mrCluster = new MiniMRCluster(2, fs.getUri().toString(), 1);

      try {
        JobConf jobConf = new JobConf(conf, TestTableMapReduce.class);
        jobConf.setJobName("process column contents");
        jobConf.setNumMapTasks(2);
        jobConf.setNumReduceTasks(1);

        TableMap.initJob(MULTI_REGION_TABLE_NAME, INPUT_COLUMN,
            ProcessContentsMapper.class, jobConf);

        TableReduce.initJob(MULTI_REGION_TABLE_NAME,
            IdentityTableReduce.class, jobConf);
        LOG.info("Started " + MULTI_REGION_TABLE_NAME);
        JobClient.runJob(jobConf);

        // verify map-reduce results
        verify(MULTI_REGION_TABLE_NAME);
      } finally {
        mrCluster.shutdown();
      }
    } finally {
      table.close();
    }
  }
View Full Code Here

    }
  }

  private void scanTable(String tableName, boolean printValues)
  throws IOException {
    HTable table = new HTable(conf, new Text(tableName));
   
    HScannerInterface scanner =
      table.obtainScanner(columns, HConstants.EMPTY_START_ROW);
   
    try {
      HStoreKey key = new HStoreKey();
      TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
     
View Full Code Here

    }
  }

  @SuppressWarnings("null")
  private void verify(String tableName) throws IOException {
    HTable table = new HTable(conf, new Text(tableName));
    boolean verified = false;
    long pause = conf.getLong("hbase.client.pause", 5 * 1000);
    int numRetries = conf.getInt("hbase.client.retries.number", 5);
    for (int i = 0; i < numRetries; i++) {
      try {
View Full Code Here

    // Create a table.
    HBaseAdmin admin = new HBaseAdmin(this.conf);
    admin.createTable(desc);

    // insert some data into the test table
    HTable table = new HTable(conf, new Text(TABLE_NAME));

    for (int i = 0; i < NUM_ROWS; i++) {
      long id = table.startUpdate(new Text("row_" + String.format("%1$05d", i)));

      table.put(id, TEXT_COLUMN1, VALUE);
      table.put(id, TEXT_COLUMN2, String.format("%1$05d", i).getBytes());
      table.commit(id);
    }

    // LOG.info("Print table contents using scanner before map/reduce for " + TABLE_NAME);
    // scanTable(TABLE_NAME, false);
    // LOG.info("Print table contents using scanner+filter before map/reduce for " + TABLE_NAME);
    // scanTableWithRowFilter(TABLE_NAME, false);

    // Do some identity write operations on one column of the data.
    for (int n = 0; n < NUM_REWRITES; n++) {
      for (int i = 0; i < NUM_ROWS; i++) {
        long id = table.startUpdate(new Text("row_" + String.format("%1$05d", i)));

        table.put(id, TEXT_COLUMN2, String.format("%1$05d", i).getBytes());
        table.commit(id);
      }
    }

    // Wait for the flush to happen
    LOG.info("Waiting, for flushes to complete");
    Thread.sleep(5 * 1000);
    // Wait for the flush to happen
    LOG.info("Done. No flush should happen after this");

    // Do another round so to populate the mem cache
    for (int i = 0; i < NUM_ROWS; i++) {
      long id = table.startUpdate(new Text("row_" + String.format("%1$05d", i)));

      table.put(id, TEXT_COLUMN2, String.format("%1$05d", i).getBytes());
      table.commit(id);
    }

    LOG.info("Print table contents using scanner after map/reduce for " + TABLE_NAME);
    scanTable(TABLE_NAME, true);
    LOG.info("Print table contents using scanner+filter after map/reduce for " + TABLE_NAME);
View Full Code Here

      // Populate a table into multiple regions
      makeMultiRegionTable(conf, hCluster, this.fs, TABLE_NAME, INPUT_COLUMN);

      // Verify table indeed has multiple regions
      HTable table = new HTable(conf, new Text(TABLE_NAME));
      Text[] startKeys = table.getStartKeys();
      assertTrue(startKeys.length > 1);
    } catch (Exception e) {
      StaticTestEnvironment.shutdownDfs(dfsCluster);
      throw e;
    }
View Full Code Here

    return c.toString();
  }

  private void scanTable(boolean printResults)
  throws IOException {
    HTable table = new HTable(conf, new Text(TABLE_NAME));
    HScannerInterface scanner = table.obtainScanner(columns,
        HConstants.EMPTY_START_ROW);
    try {
      HStoreKey key = new HStoreKey();
      TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
      while (scanner.next(key, results)) {
View Full Code Here

        searcher = new MultiSearcher(searchers);
      } else {
        throw new IOException("no index directory found");
      }

      HTable table = new HTable(conf, new Text(TABLE_NAME));
      scanner = table.obtainScanner(columns, HConstants.EMPTY_START_ROW);

      HStoreKey key = new HStoreKey();
      TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();

      IndexConfiguration indexConf = new IndexConfiguration();
View Full Code Here

    LOG.info("Print table contents using scanner+filter after map/reduce for " + TABLE_NAME);
    scanTableWithRowFilter(TABLE_NAME, true);
  }

  private void scanTable(final String tableName, final boolean printValues) throws IOException {
    HTable table = new HTable(conf, new Text(tableName));

    HScannerInterface scanner = table.obtainScanner(columns, HConstants.EMPTY_START_ROW);
    int numFound = doScan(scanner, printValues);
    Assert.assertEquals(NUM_ROWS, numFound);
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.HTable$ClientScanner

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.