Package org.apache.hadoop.hbase

Examples of org.apache.hadoop.hbase.HBaseTestingUtility$SeenRowTracker


      this.region = null;
    }
  }

  @Test public void testgetHDFSBlocksDistribution() throws Exception {
    HBaseTestingUtility htu = new HBaseTestingUtility();
    final int DEFAULT_BLOCK_SIZE = 1024;
    htu.getConfiguration().setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
    htu.getConfiguration().setInt("dfs.replication", 2);


    // set up a cluster with 3 nodes
    MiniHBaseCluster cluster = null;
    String dataNodeHosts[] = new String[] { "host1", "host2", "host3" };
    int regionServersCount = 3;

    try {
      cluster = htu.startMiniCluster(1, regionServersCount, dataNodeHosts);
      byte [][] families = {fam1, fam2};
      HTable ht = htu.createTable(Bytes.toBytes(this.getName()), families);

      //Setting up region
      byte row[] = Bytes.toBytes("row1");
      byte col[] = Bytes.toBytes("col1");

      Put put = new Put(row);
      put.add(fam1, col, 1, Bytes.toBytes("test1"));
      put.add(fam2, col, 1, Bytes.toBytes("test2"));
      ht.put(put);

      HRegion firstRegion = htu.getHBaseCluster().
          getRegions(Bytes.toBytes(this.getName())).get(0);
      firstRegion.flushcache();
      HDFSBlocksDistribution blocksDistribution1 =
          firstRegion.getHDFSBlocksDistribution();

      // given the default replication factor is 2 and we have 2 HFiles,
      // we will have total of 4 replica of blocks on 3 datanodes; thus there
      // must be at least one host that have replica for 2 HFiles. That host's
      // weight will be equal to the unique block weight.
      long uniqueBlocksWeight1 =
          blocksDistribution1.getUniqueBlocksTotalWeight();

      String topHost = blocksDistribution1.getTopHosts().get(0);
      long topHostWeight = blocksDistribution1.getWeight(topHost);
      assertTrue(uniqueBlocksWeight1 == topHostWeight);

      // use the static method to compute the value, it should be the same.
      // static method is used by load balancer or other components
      HDFSBlocksDistribution blocksDistribution2 =
        HRegion.computeHDFSBlocksDistribution(htu.getConfiguration(),
        firstRegion.getTableDesc(),
        firstRegion.getRegionInfo().getEncodedName());
      long uniqueBlocksWeight2 =
        blocksDistribution2.getUniqueBlocksTotalWeight();

      assertTrue(uniqueBlocksWeight1 == uniqueBlocksWeight2);

      ht.close();
      } finally {
        if (cluster != null) {
          htu.shutdownMiniCluster();
        }
      }
  }
View Full Code Here


  @Test
  public void testNoExceptionFromDirectoryWithRacyChildren() throws Exception {
    Stoppable stop = new StoppableImplementation();
    // need to use a localutil to not break the rest of the test that runs on the local FS, which
    // gets hosed when we start to use a minicluster.
    HBaseTestingUtility localUtil = new HBaseTestingUtility();
    Configuration conf = localUtil.getConfiguration();
    final Path testDir = UTIL.getDataTestDir();
    final FileSystem fs = UTIL.getTestFileSystem();
    LOG.debug("Writing test data to: " + testDir);
    String confKey = "hbase.test.cleaner.delegates";
    conf.set(confKey, AlwaysDelete.class.getName());
View Full Code Here

  //This "overrides" the parent static method
  //make sure they are in sync
  @BeforeClass
  public static void setupCluster() throws Exception {
    useSecureHBaseOverride = true;
    util = new HBaseTestingUtility();
    // setup configuration
    SecureTestUtil.enableSecurity(util.getConfiguration());
    util.startMiniCluster();

    // Wait for the ACL table to become available
View Full Code Here

  private static byte[] PRIVATE_COL = Bytes.toBytes("private");
  private static byte[] PUBLIC_COL = Bytes.toBytes("public");

  @BeforeClass
  public static void setupBeforeClass() throws Exception {
    TEST_UTIL = new HBaseTestingUtility();
    Configuration conf = TEST_UTIL.getConfiguration();
    SecureTestUtil.enableSecurity(conf);
    String baseuser = User.getCurrent().getShortName();
    conf.set("hbase.superuser", conf.get("hbase.superuser", "") +
        String.format(",%s.hfs.0,%s.hfs.1,%s.hfs.2", baseuser, baseuser, baseuser));
View Full Code Here

  public void perform() throws Exception {
    if (sleepTime > 0) {
      Thread.sleep(sleepTime);
    }

    HBaseTestingUtility util = context.getHaseIntegrationTestingUtility();
    HBaseAdmin admin = util.getHBaseAdmin();

    LOG.info("Performing action: Move random region of table " + tableName);
    List<HRegionInfo> regions = admin.getTableRegions(tableNameBytes);
    if (regions == null || regions.isEmpty()) {
      LOG.info("Table " + tableName + " doesn't have regions to move");
View Full Code Here

    }
  }

  @Test
  public void testgetHDFSBlocksDistribution() throws Exception {
    HBaseTestingUtility htu = new HBaseTestingUtility();
    final int DEFAULT_BLOCK_SIZE = 1024;
    htu.getConfiguration().setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
    htu.getConfiguration().setInt("dfs.replication", 2);

    // set up a cluster with 3 nodes
    MiniHBaseCluster cluster = null;
    String dataNodeHosts[] = new String[] { "host1", "host2", "host3" };
    int regionServersCount = 3;

    try {
      cluster = htu.startMiniCluster(1, regionServersCount, dataNodeHosts);
      byte[][] families = { fam1, fam2 };
      HTable ht = htu.createTable(Bytes.toBytes(this.getName()), families);

      // Setting up region
      byte row[] = Bytes.toBytes("row1");
      byte col[] = Bytes.toBytes("col1");

      Put put = new Put(row);
      put.add(fam1, col, 1, Bytes.toBytes("test1"));
      put.add(fam2, col, 1, Bytes.toBytes("test2"));
      ht.put(put);

      HRegion firstRegion = htu.getHBaseCluster().getRegions(TableName.valueOf(this.getName()))
          .get(0);
      firstRegion.flushcache();
      HDFSBlocksDistribution blocksDistribution1 = firstRegion.getHDFSBlocksDistribution();

      // given the default replication factor is 2 and we have 2 HFiles,
      // we will have total of 4 replica of blocks on 3 datanodes; thus there
      // must be at least one host that have replica for 2 HFiles. That host's
      // weight will be equal to the unique block weight.
      long uniqueBlocksWeight1 = blocksDistribution1.getUniqueBlocksTotalWeight();

      String topHost = blocksDistribution1.getTopHosts().get(0);
      long topHostWeight = blocksDistribution1.getWeight(topHost);
      assertTrue(uniqueBlocksWeight1 == topHostWeight);

      // use the static method to compute the value, it should be the same.
      // static method is used by load balancer or other components
      HDFSBlocksDistribution blocksDistribution2 = HRegion.computeHDFSBlocksDistribution(
          htu.getConfiguration(), firstRegion.getTableDesc(), firstRegion.getRegionInfo());
      long uniqueBlocksWeight2 = blocksDistribution2.getUniqueBlocksTotalWeight();

      assertTrue(uniqueBlocksWeight1 == uniqueBlocksWeight2);

      ht.close();
    } finally {
      if (cluster != null) {
        htu.shutdownMiniCluster();
      }
    }
  }
View Full Code Here

  private static AuthenticationTokenSecretManager secretManager;
  private static ClusterId clusterId = new ClusterId();

  @BeforeClass
  public static void setupBeforeClass() throws Exception {
    TEST_UTIL = new HBaseTestingUtility();
    TEST_UTIL.startMiniZKCluster();
    // register token type for protocol
    SecurityInfo.addInfo(AuthenticationProtos.AuthenticationService.getDescriptor().getName(),
      new SecurityInfo("hbase.test.kerberos.principal",
        AuthenticationProtos.TokenIdentifier.Kind.HBASE_AUTH_TOKEN));
View Full Code Here

    this.tableName = tableName;
  }

  @Override
  public void perform() throws Exception {
    HBaseTestingUtility util = context.getHaseIntegrationTestingUtility();
    HBaseAdmin admin = util.getHBaseAdmin();
    boolean major = RandomUtils.nextInt(100) < majorRatio;

    LOG.info("Performing action: Compact random region of table "
      + tableName + ", major=" + major);
    List<HRegionInfo> regions = admin.getTableRegions(tableNameBytes);
View Full Code Here

    this.tableName = tableName;
  }

  @Override
  public void perform() throws Exception {
    HBaseTestingUtility util = context.getHaseIntegrationTestingUtility();
    HBaseAdmin admin = util.getHBaseAdmin();

    LOG.info("Performing action: Split random region of table " + tableName);
    List<HRegionInfo> regions = admin.getTableRegions(tableNameBytes);
    if (regions == null || regions.isEmpty()) {
      LOG.info("Table " + tableName + " doesn't have regions to split");
View Full Code Here

    this.tableName = tableName;
  }

  @Override
  public void perform() throws Exception {
    HBaseTestingUtility util = context.getHaseIntegrationTestingUtility();
    HBaseAdmin admin = util.getHBaseAdmin();

    LOG.info("Performing action: Flush random region of table " + tableName);
    List<HRegionInfo> regions = admin.getTableRegions(tableNameBytes);
    if (regions == null || regions.isEmpty()) {
      LOG.info("Table " + tableName + " doesn't have regions to flush");
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.HBaseTestingUtility$SeenRowTracker

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.