Package org.apache.hadoop.hbase

Examples of org.apache.hadoop.hbase.HBaseTestingUtility


    // Create config to use for this cluster
    Configuration conf = HBaseConfiguration.create();

    // Start the cluster
    final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
    TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
    log("Cluster started");

    TEST_UTIL.createTable(table, Bytes.toBytes("family"));
    HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
    RegionStates regionStates = master.getAssignmentManager().getRegionStates();
    HRegionInfo hri = regionStates.getRegionsOfTable(table).get(0);
    ServerName serverName = regionStates.getRegionServerOfRegion(hri);
    TEST_UTIL.assertRegionOnServer(hri, serverName, 200);

    ServerName dstName = null;
    for (ServerName tmpServer : master.serverManager.getOnlineServers().keySet()) {
      if (!tmpServer.equals(serverName)) {
        dstName = tmpServer;
        break;
      }
    }
    // find a different server
    assertTrue(dstName != null);
    // shutdown HBase cluster
    TEST_UTIL.shutdownMiniHBaseCluster();
    // create a RIT node in offline state
    ZooKeeperWatcher zkw = TEST_UTIL.getZooKeeperWatcher();
    ZKAssign.createNodeOffline(zkw, hri, dstName);
    Stat stat = new Stat();
    byte[] data =
        ZKAssign.getDataNoWatch(zkw, hri.getEncodedName(), stat);
    assertTrue(data != null);
    RegionTransition rt = RegionTransition.parseFrom(data);
    assertTrue(rt.getEventType() == EventType.M_ZK_REGION_OFFLINE);

    LOG.info(hri.getEncodedName() + " region is in offline state with source server=" + serverName
        + " and dst server=" + dstName);

    // start HBase cluster
    TEST_UTIL.startMiniHBaseCluster(NUM_MASTERS, NUM_RS);

    while (true) {
      master = TEST_UTIL.getHBaseCluster().getMaster();
      if (master != null && master.isInitialized()) {
        ServerManager serverManager = master.getServerManager();
        if (!serverManager.areDeadServersInProgress()) {
          break;
        }
      }
      Thread.sleep(200);
    }

    // verify the region is assigned
    master = TEST_UTIL.getHBaseCluster().getMaster();
    master.getAssignmentManager().waitForAssignment(hri);
    regionStates = master.getAssignmentManager().getRegionStates();
    RegionState newState = regionStates.getRegionState(hri);
    assertTrue(newState.isOpened());
  }
View Full Code Here


    final int NUM_MASTERS = 3;
    final int NUM_RS = 3;

    // Start the cluster
    HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();

    TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();

    // get all the master threads
    List<MasterThread> masterThreads = cluster.getMasterThreads();

    // wait for each to come online
    for (MasterThread mt : masterThreads) {
      assertTrue(mt.isAlive());
    }

    // verify only one is the active master and we have right number
    int numActive = 0;
    int activeIndex = -1;
    ServerName activeName = null;
    HMaster active = null;
    for (int i = 0; i < masterThreads.size(); i++) {
      if (masterThreads.get(i).getMaster().isActiveMaster()) {
        numActive++;
        activeIndex = i;
        active = masterThreads.get(activeIndex).getMaster();
        activeName = active.getServerName();
      }
    }
    assertEquals(1, numActive);
    assertEquals(NUM_MASTERS, masterThreads.size());
    LOG.info("Active master " + activeName);

    // Check that ClusterStatus reports the correct active and backup masters
    assertNotNull(active);
    ClusterStatus status = active.getClusterStatus();
    assertTrue(status.getMaster().equals(activeName));
    assertEquals(2, status.getBackupMastersSize());
    assertEquals(2, status.getBackupMasters().size());

    // attempt to stop one of the inactive masters
    int backupIndex = (activeIndex == 0 ? 1 : activeIndex - 1);
    HMaster master = cluster.getMaster(backupIndex);
    LOG.debug("\n\nStopping a backup master: " + master.getServerName() + "\n");
    cluster.stopMaster(backupIndex, false);
    cluster.waitOnMaster(backupIndex);

    // Verify still one active master and it's the same
    for (int i = 0; i < masterThreads.size(); i++) {
      if (masterThreads.get(i).getMaster().isActiveMaster()) {
        assertTrue(activeName.equals(masterThreads.get(i).getMaster().getServerName()));
        activeIndex = i;
        active = masterThreads.get(activeIndex).getMaster();
      }
    }
    assertEquals(1, numActive);
    assertEquals(2, masterThreads.size());
    int rsCount = masterThreads.get(activeIndex).getMaster().getClusterStatus().getServersSize();
    LOG.info("Active master " + active.getServerName() + " managing " + rsCount +  " regions servers");
    assertEquals(3, rsCount);

    // Check that ClusterStatus reports the correct active and backup masters
    assertNotNull(active);
    status = active.getClusterStatus();
    assertTrue(status.getMaster().equals(activeName));
    assertEquals(1, status.getBackupMastersSize());
    assertEquals(1, status.getBackupMasters().size());

    // kill the active master
    LOG.debug("\n\nStopping the active master " + active.getServerName() + "\n");
    cluster.stopMaster(activeIndex, false);
    cluster.waitOnMaster(activeIndex);

    // wait for an active master to show up and be ready
    assertTrue(cluster.waitForActiveAndReadyMaster());

    LOG.debug("\n\nVerifying backup master is now active\n");
    // should only have one master now
    assertEquals(1, masterThreads.size());

    // and he should be active
    active = masterThreads.get(0).getMaster();
    assertNotNull(active);
    status = active.getClusterStatus();
    ServerName mastername = status.getMaster();
    assertTrue(mastername.equals(active.getServerName()));
    assertTrue(active.isActiveMaster());
    assertEquals(0, status.getBackupMastersSize());
    assertEquals(0, status.getBackupMasters().size());
    int rss = status.getServersSize();
    LOG.info("Active master " + mastername.getServerName() + " managing " +
      rss +  " region servers");
    assertEquals(3, rss);

    // Stop the cluster
    TEST_UTIL.shutdownMiniCluster();
  }
View Full Code Here

    }
  }

  @BeforeClass
  public static void setupCluster() throws Exception {
    util = new HBaseTestingUtility();
    util.startMiniCluster(1);
  }
View Full Code Here

  static MiniDFSCluster dfsCluster;
  static MiniZooKeeperCluster zkCluster;

  @BeforeClass
  public static void setup() throws Exception {
    TEST_UTIL = new HBaseTestingUtility(HBaseConfiguration.create());
    dfsCluster = TEST_UTIL.startMiniDFSCluster(1);
    zkCluster = TEST_UTIL.startMiniZKCluster();
    originalConf = TEST_UTIL.getConfiguration();
  }
View Full Code Here

    // Make the failure test faster
    conf.setInt("zookeeper.recovery.retry", 0);
    conf.setInt(HConstants.REGIONSERVER_INFO_PORT, -1);
    conf.setFloat(HConstants.LOAD_BALANCER_SLOP_KEY, (float) 100.0); // no load balancing
    conf.setInt("hbase.regionserver.wal.max.splitters", 3);
    TEST_UTIL = new HBaseTestingUtility(conf);
    TEST_UTIL.setDFSCluster(dfsCluster);
    TEST_UTIL.setZkCluster(zkCluster);
    TEST_UTIL.startMiniHBaseCluster(NUM_MASTERS, num_rs);
    cluster = TEST_UTIL.getHBaseCluster();
    LOG.info("Waiting for active/ready master");
View Full Code Here

  static MiniDFSCluster dfsCluster;
  static MiniZooKeeperCluster zkCluster;

  @BeforeClass
  public static void setup() throws Exception {
    TEST_UTIL = new HBaseTestingUtility(HBaseConfiguration.create());
    dfsCluster = TEST_UTIL.startMiniDFSCluster(1);
    zkCluster = TEST_UTIL.startMiniZKCluster();
    originalConf = TEST_UTIL.getConfiguration();
  }
View Full Code Here

    this.conf = inConf;
    conf.getLong("hbase.splitlog.max.resubmit", 0);
    // Make the failure test faster
    conf.setInt("zookeeper.recovery.retry", 0);
    TEST_UTIL.shutdownMiniHBaseCluster();
    TEST_UTIL = new HBaseTestingUtility(conf);
    TEST_UTIL.setDFSCluster(dfsCluster);
    TEST_UTIL.setZkCluster(zkCluster);
    TEST_UTIL.startMiniHBaseCluster(NUM_MASTERS, num_rs);
    cluster = TEST_UTIL.getHBaseCluster();
    LOG.info("Waiting for active/ready master");
View Full Code Here

  //This "overrides" the parent static method
  //make sure they are in sync
  @BeforeClass
  public static void setupCluster() throws Exception {
    useSecureHBaseOverride = true;
    util = new HBaseTestingUtility();
    // setup configuration
    SecureTestUtil.enableSecurity(util.getConfiguration());
    util.startMiniCluster();

    // Wait for the ACL table to become available
View Full Code Here

      this.region = null;
    }
  }

  @Test public void testgetHDFSBlocksDistribution() throws Exception {
    HBaseTestingUtility htu = new HBaseTestingUtility();
    final int DEFAULT_BLOCK_SIZE = 1024;
    htu.getConfiguration().setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
    htu.getConfiguration().setInt("dfs.replication", 2);


    // set up a cluster with 3 nodes
    MiniHBaseCluster cluster = null;
    String dataNodeHosts[] = new String[] { "host1", "host2", "host3" };
    int regionServersCount = 3;

    try {
      cluster = htu.startMiniCluster(1, regionServersCount, dataNodeHosts);
      byte [][] families = {fam1, fam2};
      HTable ht = htu.createTable(Bytes.toBytes(this.getName()), families);

      //Setting up region
      byte row[] = Bytes.toBytes("row1");
      byte col[] = Bytes.toBytes("col1");

      Put put = new Put(row);
      put.add(fam1, col, 1, Bytes.toBytes("test1"));
      put.add(fam2, col, 1, Bytes.toBytes("test2"));
      ht.put(put);

      HRegion firstRegion = htu.getHBaseCluster().
          getRegions(Bytes.toBytes(this.getName())).get(0);
      firstRegion.flushcache();
      HDFSBlocksDistribution blocksDistribution1 =
          firstRegion.getHDFSBlocksDistribution();

      // given the default replication factor is 2 and we have 2 HFiles,
      // we will have total of 4 replica of blocks on 3 datanodes; thus there
      // must be at least one host that have replica for 2 HFiles. That host's
      // weight will be equal to the unique block weight.
      long uniqueBlocksWeight1 =
          blocksDistribution1.getUniqueBlocksTotalWeight();

      String topHost = blocksDistribution1.getTopHosts().get(0);
      long topHostWeight = blocksDistribution1.getWeight(topHost);
      assertTrue(uniqueBlocksWeight1 == topHostWeight);

      // use the static method to compute the value, it should be the same.
      // static method is used by load balancer or other components
      HDFSBlocksDistribution blocksDistribution2 =
        HRegion.computeHDFSBlocksDistribution(htu.getConfiguration(),
        firstRegion.getTableDesc(),
        firstRegion.getRegionInfo().getEncodedName());
      long uniqueBlocksWeight2 =
        blocksDistribution2.getUniqueBlocksTotalWeight();

      assertTrue(uniqueBlocksWeight1 == uniqueBlocksWeight2);

      ht.close();
      } finally {
        if (cluster != null) {
          htu.shutdownMiniCluster();
        }
      }
  }
View Full Code Here

    }
  }

  @BeforeClass
  public static void setupCluster() throws Exception {
    util = new HBaseTestingUtility();
    util.startMiniCluster(1);
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.HBaseTestingUtility

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.