Package org.apache.hadoop.hbase.client

Examples of org.apache.hadoop.hbase.client.Table


  @Test
  public void testCoprocessorError() throws Exception {
    Configuration configuration = new Configuration(util.getConfiguration());
    // Make it not retry forever
    configuration.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
    Table table = new HTable(configuration, TEST_TABLE);

    try {
      CoprocessorRpcChannel protocol = table.coprocessorService(ROWS[0]);

      TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface service =
          TestRpcServiceProtos.TestProtobufRpcProto.newBlockingStub(protocol);

      service.error(null, TestProtos.EmptyRequestProto.getDefaultInstance());
      fail("Should have thrown an exception");
    } catch (ServiceException e) {
    } finally {
      table.close();
    }
  }
View Full Code Here


        HelpFormatter formatter = new HelpFormatter();
        formatter.printHelp(getClass().getSimpleName(), options);
        System.exit(-1);
      }

      Table table = new HTable(getConf(), getTableName(getConf()));

      Scan scan = new Scan();
      scan.setBatch(10000);

      if (cmd.hasOption("s"))
        scan.setStartRow(Bytes.toBytesBinary(cmd.getOptionValue("s")));

      if (cmd.hasOption("e"))
        scan.setStopRow(Bytes.toBytesBinary(cmd.getOptionValue("e")));

      int limit = 0;
      if (cmd.hasOption("l"))
        limit = Integer.parseInt(cmd.getOptionValue("l"));
      else
        limit = 100;

      ResultScanner scanner = table.getScanner(scan);

      CINode node = new CINode();
      Result result = scanner.next();
      int count = 0;
      while (result != null && count++ < limit) {
        node = getCINode(result, node);
        System.out.printf("%s:%s:%012d:%s\n", Bytes.toStringBinary(node.key),
            Bytes.toStringBinary(node.prev), node.count, node.client);
        result = scanner.next();
      }
      scanner.close();
      table.close();

      return 0;
    }
View Full Code Here

      byte[] val = Bytes.toBytesBinary(args[0]);

      org.apache.hadoop.hbase.client.Delete delete
        = new org.apache.hadoop.hbase.client.Delete(val);

      Table table = new HTable(getConf(), getTableName(getConf()));

      table.delete(delete);
      table.flushCommits();
      table.close();

      System.out.println("Delete successful");
      return 0;
    }
View Full Code Here

      Random rand = new Random();
      boolean isSpecificStart = cmd.hasOption('s');
      byte[] startKey = isSpecificStart ? Bytes.toBytesBinary(cmd.getOptionValue('s')) : null;
      int logEvery = cmd.hasOption('l') ? Integer.parseInt(cmd.getOptionValue('l')) : 1;

      Table table = new HTable(getConf(), getTableName(getConf()));
      long numQueries = 0;
      // If isSpecificStart is set, only walk one list from that particular node.
      // Note that in case of circular (or P-shaped) list it will walk forever, as is
      // the case in normal run without startKey.
      while (numQueries < maxQueries && (numQueries == 0 || !isSpecificStart)) {
        if (!isSpecificStart) {
          startKey = new byte[ROWKEY_LENGTH];
          rand.nextBytes(startKey);
        }
        CINode node = findStartNode(table, startKey);
        if (node == null && isSpecificStart) {
          System.err.printf("Start node not found: %s \n", Bytes.toStringBinary(startKey));
        }
        numQueries++;
        while (node != null && node.prev.length != NO_KEY.length && numQueries < maxQueries) {
          byte[] prev = node.prev;
          long t1 = System.currentTimeMillis();
          node = getNode(prev, table, node);
          long t2 = System.currentTimeMillis();
          if (numQueries % logEvery == 0) {
            System.out.printf("CQ %d: %d %s \n", numQueries, t2 - t1, Bytes.toStringBinary(prev));
          }
          numQueries++;
          if (node == null) {
            System.err.printf("UNDEFINED NODE %s \n", Bytes.toStringBinary(prev));
          } else if (node.prev.length == NO_KEY.length) {
            System.err.printf("TERMINATING NODE %s \n", Bytes.toStringBinary(node.key));
          }
        }
      }

      table.close();
      return 0;
    }
View Full Code Here

      null);

    Admin admin = UTIL.getHBaseAdmin();
    admin.createTable(desc);

    Table table = new HTable(conf, desc.getTableName());

    // put a row and flush it to disk
    Put put = new Put(ROW);
    put.add(A, A, A);
    table.put(put);
    table.flushCommits();

    HRegionServer rs = UTIL.getRSForFirstRegionInTable(desc.getTableName());
    List<HRegion> regions = rs.getOnlineRegions(desc.getTableName());
    assertEquals("More than 1 region serving test table with 1 row", 1, regions.size());
    HRegion region = regions.get(0);
    admin.flushRegion(region.getRegionName());
    CountDownLatch latch = ((CompactionCompletionNotifyingRegion)region)
        .getCompactionStateChangeLatch();

    // put another row and flush that too
    put = new Put(Bytes.toBytes("anotherrow"));
    put.add(A, A, A);
    table.put(put);
    table.flushCommits();
    admin.flushRegion(region.getRegionName());

    // run a compaction, which normally would should get rid of the data
    // wait for the compaction checker to complete
    latch.await();
    // check both rows to ensure that they aren't there
    Get get = new Get(ROW);
    Result r = table.get(get);
    assertNull(
      "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
          + r, r.listCells());

    get = new Get(Bytes.toBytes("anotherrow"));
    r = table.get(get);
    assertNull(
      "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor Found: "
          + r, r.listCells());

    table.close();
    UTIL.shutdownMiniCluster();
  }
View Full Code Here

   * do a single put that is bypassed by a RegionObserver
   * @throws Exception
   */
  @Test
  public void testSimple() throws Exception {
    Table t = new HTable(util.getConfiguration(), tableName);
    Put p = new Put(row1);
    p.add(test,dummy,dummy);
    // before HBASE-4331, this would throw an exception
    t.put(p);
    checkRowAndDelete(t,row1,0);
    t.close();
  }
View Full Code Here

  public void testMulti() throws Exception {
    //ensure that server time increments every time we do an operation, otherwise
    //previous deletes will eclipse successive puts having the same timestamp
    EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());

    Table t = new HTable(util.getConfiguration(), tableName);
    List<Put> puts = new ArrayList<Put>();
    Put p = new Put(row1);
    p.add(dummy,dummy,dummy);
    puts.add(p);
    p = new Put(row2);
    p.add(test,dummy,dummy);
    puts.add(p);
    p = new Put(row3);
    p.add(test,dummy,dummy);
    puts.add(p);
    // before HBASE-4331, this would throw an exception
    t.put(puts);
    checkRowAndDelete(t,row1,1);
    checkRowAndDelete(t,row2,0);
    checkRowAndDelete(t,row3,0);

    puts.clear();
    p = new Put(row1);
    p.add(test,dummy,dummy);
    puts.add(p);
    p = new Put(row2);
    p.add(test,dummy,dummy);
    puts.add(p);
    p = new Put(row3);
    p.add(test,dummy,dummy);
    puts.add(p);
    // before HBASE-4331, this would throw an exception
    t.put(puts);
    checkRowAndDelete(t,row1,0);
    checkRowAndDelete(t,row2,0);
    checkRowAndDelete(t,row3,0);

    puts.clear();
    p = new Put(row1);
    p.add(test,dummy,dummy);
    puts.add(p);
    p = new Put(row2);
    p.add(test,dummy,dummy);
    puts.add(p);
    p = new Put(row3);
    p.add(dummy,dummy,dummy);
    puts.add(p);
    // this worked fine even before HBASE-4331
    t.put(puts);
    checkRowAndDelete(t,row1,0);
    checkRowAndDelete(t,row2,0);
    checkRowAndDelete(t,row3,1);

    puts.clear();
    p = new Put(row1);
    p.add(dummy,dummy,dummy);
    puts.add(p);
    p = new Put(row2);
    p.add(test,dummy,dummy);
    puts.add(p);
    p = new Put(row3);
    p.add(dummy,dummy,dummy);
    puts.add(p);
    // this worked fine even before HBASE-4331
    t.put(puts);
    checkRowAndDelete(t,row1,1);
    checkRowAndDelete(t,row2,0);
    checkRowAndDelete(t,row3,1);

    puts.clear();
    p = new Put(row1);
    p.add(test,dummy,dummy);
    puts.add(p);
    p = new Put(row2);
    p.add(dummy,dummy,dummy);
    puts.add(p);
    p = new Put(row3);
    p.add(test,dummy,dummy);
    puts.add(p);
    // before HBASE-4331, this would throw an exception
    t.put(puts);
    checkRowAndDelete(t,row1,0);
    checkRowAndDelete(t,row2,1);
    checkRowAndDelete(t,row3,0);
    t.close();

    EnvironmentEdgeManager.reset();
  }
View Full Code Here

    Admin admin = UTIL.getHBaseAdmin();
    admin.createTable(primary);
    admin.createTable(other);

    Table table = new HTable(UTIL.getConfiguration(), "primary");
    Put p = new Put(new byte[] { 'a' });
    p.add(family, null, new byte[] { 'a' });
    table.put(p);
    table.flushCommits();
    table.close();

    Table target = new HTable(UTIL.getConfiguration(), otherTable);
    assertTrue("Didn't complete update to target table!", completeCheck[0]);
    assertEquals("Didn't find inserted row", 1, getKeyValueCount(target));
    target.close();
  }
View Full Code Here

  /** Tests that the meta location is saved for secondary regions */
  @Test(timeout = 60000)
  public void testRegionReplicaUpdatesMetaLocation() throws Exception {
    openRegion(hriSecondary);
    Table meta = null;
    try {
      meta = new HTable(HTU.getConfiguration(), TableName.META_TABLE_NAME);
      TestMetaTableAccessor.assertMetaLocation(meta, hriPrimary.getRegionName()
        , getRS().getServerName(), -1, 1, false);
    } finally {
      if (meta != null ) meta.close();
      closeRegion(hriSecondary);
    }
  }
View Full Code Here

  public static class SendToOtherTableCoprocessor extends BaseRegionObserver {

    @Override
    public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e, final Put put,
        final WALEdit edit, final Durability durability) throws IOException {
      Table table = e.getEnvironment().getTable(otherTable);
      table.put(put);
      table.flushCommits();
      completed[0] = true;
      table.close();
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.client.Table

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.