Package org.apache.hadoop.hbase

Examples of org.apache.hadoop.hbase.MiniHBaseCluster


  private static void startHBase() throws Exception {
    if (hbaseCluster != null) {
      LOG.error("MiniHBaseCluster already running");
      return;
    }
    hbaseCluster = new MiniHBaseCluster(conf, 1);
    // opening the META table ensures that cluster is running
    new HTable(conf, HConstants.META_TABLE_NAME);
    LOG.info("started MiniHBaseCluster");
  }
View Full Code Here


   * Test we reopen a region once closed.
   * @throws Exception
   */
  @Test (timeout=300000) public void testReOpenRegion()
  throws Exception {
    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    LOG.info("Number of region servers = " +
      cluster.getLiveRegionServerThreads().size());

    int rsIdx = 0;
    HRegionServer regionServer =
      TEST_UTIL.getHBaseCluster().getRegionServer(rsIdx);
    HRegionInfo hri = getNonMetaRegion(ProtobufUtil.getOnlineRegions(regionServer));
    LOG.debug("Asking RS to close region " + hri.getRegionNameAsString());

    LOG.info("Unassign " + hri.getRegionNameAsString());
    cluster.getMaster().assignmentManager.unassign(hri);

    while (!cluster.getMaster().assignmentManager.wasClosedHandlerCalled(hri)) {
      Threads.sleep(100);
    }

    while (!cluster.getMaster().assignmentManager.wasOpenedHandlerCalled(hri)) {
      Threads.sleep(100);
    }

    LOG.info("Done with testReOpenRegion");
  }
View Full Code Here

   * @throws Exception
   */
  @Test
  public void testRSAlreadyProcessingRegion() throws Exception {
    LOG.info("starting testRSAlreadyProcessingRegion");
    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();

    HRegionServer hr0 =
        cluster.getLiveRegionServerThreads().get(0).getRegionServer();
    HRegionServer hr1 =
        cluster.getLiveRegionServerThreads().get(1).getRegionServer();
    HRegionInfo hri = getNonMetaRegion(ProtobufUtil.getOnlineRegions(hr0));

    // fake that hr1 is processing the region
    hr1.getRegionsInTransitionInRS().putIfAbsent(hri.getEncodedNameAsBytes(), true);

    // now ask the master to move the region to hr1, will fail
    TEST_UTIL.getHBaseAdmin().move(hri.getEncodedNameAsBytes(),
        Bytes.toBytes(hr1.getServerName().toString()));

    // make sure the region came back
    assertEquals(hr1.getOnlineRegion(hri.getEncodedNameAsBytes()), null);

    // remove the block and reset the boolean
    hr1.getRegionsInTransitionInRS().remove(hri.getEncodedNameAsBytes());

    // now try moving a region when there is no region in transition.
    hri = getNonMetaRegion(ProtobufUtil.getOnlineRegions(hr1));

    TEST_UTIL.getHBaseAdmin().move(hri.getEncodedNameAsBytes(),
        Bytes.toBytes(hr0.getServerName().toString()));

    while (!cluster.getMaster().assignmentManager.wasOpenedHandlerCalled(hri)) {
      Threads.sleep(100);
    }

    // make sure the region has moved from the original RS
    assertTrue(hr1.getOnlineRegion(hri.getEncodedNameAsBytes()) == null);
View Full Code Here

  }

  @Test (timeout=300000) public void testCloseRegion()
  throws Exception {
    LOG.info("Running testCloseRegion");
    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    LOG.info("Number of region servers = " + cluster.getLiveRegionServerThreads().size());

    int rsIdx = 0;
    HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(rsIdx);
    HRegionInfo hri = getNonMetaRegion(ProtobufUtil.getOnlineRegions(regionServer));
    LOG.debug("Asking RS to close region " + hri.getRegionNameAsString());

    cluster.getMaster().assignmentManager.unassign(hri);

    while (!cluster.getMaster().assignmentManager.wasClosedHandlerCalled(hri)) {
      Threads.sleep(100);
    }
    LOG.info("Done with testCloseRegion");
  }
View Full Code Here

    HBaseAdmin ha = new HBaseAdmin(conn);
    assertTrue(ha.tableExists(tableName));
    assertTrue(t.get(new Get(ROW)).isEmpty());

    // stop the master
    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    cluster.stopMaster(0, false);
    cluster.waitOnMaster(0);

    // start up a new master
    cluster.startMaster();
    assertTrue(cluster.waitForActiveAndReadyMaster());

    // test that the same unmanaged connection works with a new
    // HBaseAdmin and can connect to the new master;
    HBaseAdmin newAdmin = new HBaseAdmin(conn);
    assertTrue(newAdmin.tableExists(tableName));
View Full Code Here

  }

  @Test
  public void testGlobalAuthorizationForNewRegisteredRS() throws Exception {
    LOG.debug("Test for global authorization for a new registered RegionServer.");
    MiniHBaseCluster hbaseCluster = TEST_UTIL.getHBaseCluster();

    // Since each RegionServer running on different user, add global
    // permissions for the new user.
    String currentUser = User.getCurrent().getShortName();
    String activeUserForNewRs = currentUser + ".hfs." +
      hbaseCluster.getLiveRegionServerThreads().size();
    SecureTestUtil.grantGlobal(TEST_UTIL, activeUserForNewRs,
      Permission.Action.ADMIN, Permission.Action.CREATE, Permission.Action.READ,
        Permission.Action.WRITE);

    final HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
    HTableDescriptor htd = new HTableDescriptor(TEST_TABLE2);
    htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
    admin.createTable(htd);

    // Starting a new RegionServer.
    JVMClusterUtil.RegionServerThread newRsThread = hbaseCluster
        .startRegionServer();
    final HRegionServer newRs = newRsThread.getRegionServer();

    // Move region to the new RegionServer.
    final HTable table = new HTable(TEST_UTIL.getConfiguration(), TEST_TABLE2);
View Full Code Here

    meta.close();
  }

  @Test(timeout=180000)
  public void testFixAssignmentsWhenMETAinTransition() throws Exception {
    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    HBaseAdmin admin = null;
    try {
      admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
      admin.closeRegion(cluster.getServerHoldingMeta(),
          HRegionInfo.FIRST_META_REGIONINFO);
    } finally {
      if (admin != null) {
        admin.close();
      }
    }
    regionStates.regionOffline(HRegionInfo.FIRST_META_REGIONINFO);
    MetaRegionTracker.deleteMetaLocation(cluster.getMaster().getZooKeeper());
    assertFalse(regionStates.isRegionOnline(HRegionInfo.FIRST_META_REGIONINFO));
    HBaseFsck hbck = doFsck(conf, true);
    assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.UNKNOWN, ERROR_CODE.NO_META_REGION,
        ERROR_CODE.NULL_META_REGION });
    assertNoErrors(doFsck(conf, false));
View Full Code Here

    try {
      setupTable(table);
      assertEquals(ROWKEYS.length, countRows());

      // Mess it up by creating an overlap
      MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
      HMaster master = cluster.getMaster();
      HRegionInfo hriOverlap1 = createRegion(conf, tbl.getTableDescriptor(),
        Bytes.toBytes("A"), Bytes.toBytes("AB"));
      master.assignRegion(hriOverlap1);
      master.getAssignmentManager().waitForAssignment(hriOverlap1);
      HRegionInfo hriOverlap2 = createRegion(conf, tbl.getTableDescriptor(),
        Bytes.toBytes("AB"), Bytes.toBytes("B"));
      master.assignRegion(hriOverlap2);
      master.getAssignmentManager().waitForAssignment(hriOverlap2);

      HBaseFsck hbck = doFsck(conf, false);
      assertErrors(hbck, new ERROR_CODE[] {ERROR_CODE.DUPE_STARTKEYS,
        ERROR_CODE.DUPE_STARTKEYS, ERROR_CODE.OVERLAP_IN_REGION_CHAIN});
      assertEquals(3, hbck.getOverlapGroups(table).size());
      assertEquals(ROWKEYS.length, countRows());

      // mess around the overlapped regions, to trigger NotServingRegionException
      Multimap<byte[], HbckInfo> overlapGroups = hbck.getOverlapGroups(table);
      ServerName serverName = null;
      byte[] regionName = null;
      for (HbckInfo hbi: overlapGroups.values()) {
        if ("A".equals(Bytes.toString(hbi.getStartKey()))
            && "B".equals(Bytes.toString(hbi.getEndKey()))) {
          regionName = hbi.getRegionName();

          // get an RS not serving the region to force bad assignment info in to META.
          int k = cluster.getServerWith(regionName);
          for (int i = 0; i < 3; i++) {
            if (i != k) {
              HRegionServer rs = cluster.getRegionServer(i);
              serverName = rs.getServerName();
              break;
            }
          }

          HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
          HBaseFsckRepair.closeRegionSilentlyAndWait(admin,
            cluster.getRegionServer(k).getServerName(), hbi.getHdfsHRI());
          admin.offline(regionName);
          break;
        }
      }
View Full Code Here

  public void testRegionShouldNotBeDeployed() throws Exception {
    TableName table =
        TableName.valueOf("tableRegionShouldNotBeDeployed");
    try {
      LOG.info("Starting testRegionShouldNotBeDeployed.");
      MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
      assertTrue(cluster.waitForActiveAndReadyMaster());


      byte[][] SPLIT_KEYS = new byte[][] { new byte[0], Bytes.toBytes("aaa"),
          Bytes.toBytes("bbb"), Bytes.toBytes("ccc"), Bytes.toBytes("ddd") };
      HTableDescriptor htdDisabled = new HTableDescriptor(table);
      htdDisabled.addFamily(new HColumnDescriptor(FAM));

      // Write the .tableinfo
      FSTableDescriptors fstd = new FSTableDescriptors(conf);
      fstd.createTableDescriptor(htdDisabled);
      List<HRegionInfo> disabledRegions = TEST_UTIL.createMultiRegionsInMeta(
          TEST_UTIL.getConfiguration(), htdDisabled, SPLIT_KEYS);

      // Let's just assign everything to first RS
      HRegionServer hrs = cluster.getRegionServer(0);

      // Create region files.
      TEST_UTIL.getHBaseAdmin().disableTable(table);
      TEST_UTIL.getHBaseAdmin().enableTable(table);

      // Disable the table and close its regions
      TEST_UTIL.getHBaseAdmin().disableTable(table);
      HRegionInfo region = disabledRegions.remove(0);
      byte[] regionName = region.getRegionName();

      // The region should not be assigned currently
      assertTrue(cluster.getServerWith(regionName) == -1);

      // Directly open a region on a region server.
      // If going through AM/ZK, the region won't be open.
      // Even it is opened, AM will close it which causes
      // flakiness of this test.
View Full Code Here

    Configuration conf = HBaseConfiguration.create();

    // Start the cluster
    HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
    TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    log("Cluster started");

    // Create a ZKW to use in the test
    ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(TEST_UTIL);

    // get all the master threads
    List<MasterThread> masterThreads = cluster.getMasterThreads();
    assertEquals(1, masterThreads.size());

    // only one master thread, let's wait for it to be initialized
    assertTrue(cluster.waitForActiveAndReadyMaster());
    HMaster master = masterThreads.get(0).getMaster();
    assertTrue(master.isActiveMaster());
    assertTrue(master.isInitialized());

    // disable load balancing on this master
    master.balanceSwitch(false);

    // create two tables in META, each with 10 regions
    byte [] FAMILY = Bytes.toBytes("family");
    byte [][] SPLIT_KEYS = new byte [][] {
        new byte[0], Bytes.toBytes("aaa"), Bytes.toBytes("bbb"),
        Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"),
        Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"),
        Bytes.toBytes("iii"), Bytes.toBytes("jjj")
    };

    byte [] enabledTable = Bytes.toBytes("enabledTable");
    HTableDescriptor htdEnabled = new HTableDescriptor(TableName.valueOf(enabledTable));
    htdEnabled.addFamily(new HColumnDescriptor(FAMILY));

    FileSystem filesystem = FileSystem.get(conf);
    Path rootdir = FSUtils.getRootDir(conf);
    FSTableDescriptors fstd = new FSTableDescriptors(filesystem, rootdir);
    // Write the .tableinfo
    fstd.createTableDescriptor(htdEnabled);

    HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getTableName(), null, null);
    createRegion(hriEnabled, rootdir, conf, htdEnabled);

    List<HRegionInfo> enabledRegions = TEST_UTIL.createMultiRegionsInMeta(
        TEST_UTIL.getConfiguration(), htdEnabled, SPLIT_KEYS);

    TableName disabledTable = TableName.valueOf("disabledTable");
    HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable);
    htdDisabled.addFamily(new HColumnDescriptor(FAMILY));
    // Write the .tableinfo
    fstd.createTableDescriptor(htdDisabled);
    HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getTableName(), null, null);
    createRegion(hriDisabled, rootdir, conf, htdDisabled);
    List<HRegionInfo> disabledRegions = TEST_UTIL.createMultiRegionsInMeta(
        TEST_UTIL.getConfiguration(), htdDisabled, SPLIT_KEYS);

    TableName tableWithMergingRegions = TableName.valueOf("tableWithMergingRegions");
    TEST_UTIL.createTable(tableWithMergingRegions, FAMILY, new byte [][] {Bytes.toBytes("m")});

    log("Regions in hbase:meta and namespace have been created");

    // at this point we only expect 4 regions to be assigned out
    // (catalogs and namespace, + 2 merging regions)
    assertEquals(4, cluster.countServedRegions());

    // Move merging regions to the same region server
    AssignmentManager am = master.getAssignmentManager();
    RegionStates regionStates = am.getRegionStates();
    List<HRegionInfo> mergingRegions = regionStates.getRegionsOfTable(tableWithMergingRegions);
    assertEquals(2, mergingRegions.size());
    HRegionInfo a = mergingRegions.get(0);
    HRegionInfo b = mergingRegions.get(1);
    HRegionInfo newRegion = RegionMergeTransaction.getMergedRegionInfo(a, b);
    ServerName mergingServer = regionStates.getRegionServerOfRegion(a);
    ServerName serverB = regionStates.getRegionServerOfRegion(b);
    if (!serverB.equals(mergingServer)) {
      RegionPlan plan = new RegionPlan(b, serverB, mergingServer);
      am.balance(plan);
      assertTrue(am.waitForAssignment(b));
    }

    // Let's just assign everything to first RS
    HRegionServer hrs = cluster.getRegionServer(0);
    ServerName serverName = hrs.getServerName();
    HRegionInfo closingRegion = enabledRegions.remove(0);
    // we'll need some regions to already be assigned out properly on live RS
    List<HRegionInfo> enabledAndAssignedRegions = new ArrayList<HRegionInfo>();
    enabledAndAssignedRegions.add(enabledRegions.remove(0));
    enabledAndAssignedRegions.add(enabledRegions.remove(0));
    enabledAndAssignedRegions.add(closingRegion);

    List<HRegionInfo> disabledAndAssignedRegions = new ArrayList<HRegionInfo>();
    disabledAndAssignedRegions.add(disabledRegions.remove(0));
    disabledAndAssignedRegions.add(disabledRegions.remove(0));

    // now actually assign them
    for (HRegionInfo hri : enabledAndAssignedRegions) {
      master.assignmentManager.regionPlans.put(hri.getEncodedName(),
          new RegionPlan(hri, null, serverName));
      master.assignRegion(hri);
    }
    for (HRegionInfo hri : disabledAndAssignedRegions) {
      master.assignmentManager.regionPlans.put(hri.getEncodedName(),
          new RegionPlan(hri, null, serverName));
      master.assignRegion(hri);
    }

    // wait for no more RIT
    log("Waiting for assignment to finish");
    ZKAssign.blockUntilNoRIT(zkw);
    log("Assignment completed");

    // Stop the master
    log("Aborting master");
    cluster.abortMaster(0);
    cluster.waitOnMaster(0);
    log("Master has aborted");

    /*
     * Now, let's start mocking up some weird states as described in the method
     * javadoc.
     */

    List<HRegionInfo> regionsThatShouldBeOnline = new ArrayList<HRegionInfo>();
    List<HRegionInfo> regionsThatShouldBeOffline = new ArrayList<HRegionInfo>();

    log("Beginning to mock scenarios");

    // Disable the disabledTable in ZK
    ZKTable zktable = new ZKTable(zkw);
    zktable.setDisabledTable(disabledTable);

    /*
     *  ZK = OFFLINE
     */

    // Region that should be assigned but is not and is in ZK as OFFLINE
    // Cause: This can happen if the master crashed after creating the znode but before sending the
    //  request to the region server
    HRegionInfo region = enabledRegions.remove(0);
    regionsThatShouldBeOnline.add(region);
    ZKAssign.createNodeOffline(zkw, region, serverName);

    /*
     * ZK = CLOSING
     */
    // Cause: Same as offline.
    regionsThatShouldBeOnline.add(closingRegion);
    ZKAssign.createNodeClosing(zkw, closingRegion, serverName);

    /*
     * ZK = CLOSED
     */

    // Region of enabled table closed but not ack
    //Cause: Master was down while the region server updated the ZK status.
    region = enabledRegions.remove(0);
    regionsThatShouldBeOnline.add(region);
    int version = ZKAssign.createNodeClosing(zkw, region, serverName);
    ZKAssign.transitionNodeClosed(zkw, region, serverName, version);

    // Region of disabled table closed but not ack
    region = disabledRegions.remove(0);
    regionsThatShouldBeOffline.add(region);
    version = ZKAssign.createNodeClosing(zkw, region, serverName);
    ZKAssign.transitionNodeClosed(zkw, region, serverName, version);

    /*
     * ZK = OPENED
     */

    // Region of enabled table was opened on RS
    // Cause: as offline
    region = enabledRegions.remove(0);
    regionsThatShouldBeOnline.add(region);
    ZKAssign.createNodeOffline(zkw, region, serverName);
    ProtobufUtil.openRegion(hrs, region);
    while (true) {
      byte [] bytes = ZKAssign.getData(zkw, region.getEncodedName());
      RegionTransition rt = RegionTransition.parseFrom(bytes);
      if (rt != null && rt.getEventType().equals(EventType.RS_ZK_REGION_OPENED)) {
        break;
      }
      Thread.sleep(100);
    }

    // Region of disable table was opened on RS
    // Cause: Master failed while updating the status for this region server.
    region = disabledRegions.remove(0);
    regionsThatShouldBeOffline.add(region);
    ZKAssign.createNodeOffline(zkw, region, serverName);
    ProtobufUtil.openRegion(hrs, region);
    while (true) {
      byte [] bytes = ZKAssign.getData(zkw, region.getEncodedName());
      RegionTransition rt = RegionTransition.parseFrom(bytes);
      if (rt != null && rt.getEventType().equals(EventType.RS_ZK_REGION_OPENED)) {
        break;
      }
      Thread.sleep(100);
    }

    /*
     * ZK = MERGING
     */

    // Regions of table of merging regions
    // Cause: Master was down while merging was going on
    RegionMergeTransaction.createNodeMerging(
      zkw, newRegion, mergingServer, a, b);

    /*
     * ZK = NONE
     */

    /*
     * DONE MOCKING
     */

    log("Done mocking data up in ZK");

    // Start up a new master
    log("Starting up a new master");
    master = cluster.startMaster().getMaster();
    log("Waiting for master to be ready");
    cluster.waitForActiveAndReadyMaster();
    log("Master is ready");

    // Get new region states since master restarted
    regionStates = master.getAssignmentManager().getRegionStates();
    // Merging region should remain merging
    assertTrue(regionStates.isRegionInState(a, State.MERGING));
    assertTrue(regionStates.isRegionInState(b, State.MERGING));
    assertTrue(regionStates.isRegionInState(newRegion, State.MERGING_NEW));
    // Now remove the faked merging znode, merging regions should be
    // offlined automatically, otherwise it is a bug in AM.
    ZKAssign.deleteNodeFailSilent(zkw, newRegion);

    // Failover should be completed, now wait for no RIT
    log("Waiting for no more RIT");
    ZKAssign.blockUntilNoRIT(zkw);
    log("No more RIT in ZK, now doing final test verification");

    // Grab all the regions that are online across RSs
    Set<HRegionInfo> onlineRegions = new TreeSet<HRegionInfo>();
    for (JVMClusterUtil.RegionServerThread rst :
      cluster.getRegionServerThreads()) {
      onlineRegions.addAll(ProtobufUtil.getOnlineRegions(rst.getRegionServer()));
    }

    // Now, everything that should be online should be online
    for (HRegionInfo hri : regionsThatShouldBeOnline) {
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.MiniHBaseCluster

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.