conf.setInt("hbase.master.assignment.timeoutmonitor.timeout", 8000);
conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 1);
conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, 2);
// Create and start the cluster
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
log("Cluster started");
// Create a ZKW to use in the test
ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
"unittest", new Abortable() {
@Override
public void abort(String why, Throwable e) {
LOG.error("Fatal ZK Error: " + why, e);
org.junit.Assert.assertFalse("Fatal ZK error", true);
}
@Override
public boolean isAborted() {
return false;
}
});
// get all the master threads
List<MasterThread> masterThreads = cluster.getMasterThreads();
assertEquals(1, masterThreads.size());
// only one master thread, let's wait for it to be initialized
assertTrue(cluster.waitForActiveAndReadyMaster());
HMaster master = masterThreads.get(0).getMaster();
assertTrue(master.isActiveMaster());
assertTrue(master.isInitialized());
// disable load balancing on this master
master.balanceSwitch(false);
// create two tables in META, each with 30 regions
byte [] FAMILY = Bytes.toBytes("family");
byte[][] SPLIT_KEYS =
TEST_UTIL.getRegionSplitStartKeys(Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), 30);
byte [] enabledTable = Bytes.toBytes("enabledTable");
HTableDescriptor htdEnabled = new HTableDescriptor(enabledTable);
htdEnabled.addFamily(new HColumnDescriptor(FAMILY));
FileSystem filesystem = FileSystem.get(conf);
Path rootdir = filesystem.makeQualified(
new Path(conf.get(HConstants.HBASE_DIR)));
// Write the .tableinfo
FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdEnabled);
HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(),
null, null);
createRegion(hriEnabled, rootdir, conf, htdEnabled);
List<HRegionInfo> enabledRegions = TEST_UTIL.createMultiRegionsInMeta(
TEST_UTIL.getConfiguration(), htdEnabled, SPLIT_KEYS);
byte [] disabledTable = Bytes.toBytes("disabledTable");
HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable);
htdDisabled.addFamily(new HColumnDescriptor(FAMILY));
// Write the .tableinfo
FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdDisabled);
HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getName(), null, null);
createRegion(hriDisabled, rootdir, conf, htdDisabled);
List<HRegionInfo> disabledRegions = TEST_UTIL.createMultiRegionsInMeta(
TEST_UTIL.getConfiguration(), htdDisabled, SPLIT_KEYS);
log("Regions in META have been created");
// at this point we only expect 2 regions to be assigned out (catalogs)
assertEquals(2, cluster.countServedRegions());
// The first RS will stay online
List<RegionServerThread> regionservers =
cluster.getRegionServerThreads();
HRegionServer hrs = regionservers.get(0).getRegionServer();
// The second RS is going to be hard-killed
RegionServerThread hrsDeadThread = regionservers.get(1);
HRegionServer hrsDead = hrsDeadThread.getRegionServer();
ServerName deadServerName = hrsDead.getServerName();
// we'll need some regions to already be assigned out properly on live RS
List<HRegionInfo> enabledAndAssignedRegions = new ArrayList<HRegionInfo>();
enabledAndAssignedRegions.addAll(enabledRegions.subList(0, 6));
enabledRegions.removeAll(enabledAndAssignedRegions);
List<HRegionInfo> disabledAndAssignedRegions = new ArrayList<HRegionInfo>();
disabledAndAssignedRegions.addAll(disabledRegions.subList(0, 6));
disabledRegions.removeAll(disabledAndAssignedRegions);
// now actually assign them
for (HRegionInfo hri : enabledAndAssignedRegions) {
master.assignmentManager.regionPlans.put(hri.getEncodedName(),
new RegionPlan(hri, null, hrs.getServerName()));
master.assignRegion(hri);
}
for (HRegionInfo hri : disabledAndAssignedRegions) {
master.assignmentManager.regionPlans.put(hri.getEncodedName(),
new RegionPlan(hri, null, hrs.getServerName()));
master.assignRegion(hri);
}
log("Waiting for assignment to finish");
ZKAssign.blockUntilNoRIT(zkw);
master.assignmentManager.waitUntilNoRegionsInTransition(60000);
log("Assignment completed");
assertTrue(" Table must be enabled.", master.getAssignmentManager()
.getZKTable().isEnabledTable("enabledTable"));
// we also need regions assigned out on the dead server
List<HRegionInfo> enabledAndOnDeadRegions = new ArrayList<HRegionInfo>();
enabledAndOnDeadRegions.addAll(enabledRegions.subList(0, 6));
enabledRegions.removeAll(enabledAndOnDeadRegions);
List<HRegionInfo> disabledAndOnDeadRegions = new ArrayList<HRegionInfo>();
disabledAndOnDeadRegions.addAll(disabledRegions.subList(0, 6));
disabledRegions.removeAll(disabledAndOnDeadRegions);
// set region plan to server to be killed and trigger assign
for (HRegionInfo hri : enabledAndOnDeadRegions) {
master.assignmentManager.regionPlans.put(hri.getEncodedName(),
new RegionPlan(hri, null, deadServerName));
master.assignRegion(hri);
}
for (HRegionInfo hri : disabledAndOnDeadRegions) {
master.assignmentManager.regionPlans.put(hri.getEncodedName(),
new RegionPlan(hri, null, deadServerName));
master.assignRegion(hri);
}
// wait for no more RIT
log("Waiting for assignment to finish");
ZKAssign.blockUntilNoRIT(zkw);
master.assignmentManager.waitUntilNoRegionsInTransition(60000);
log("Assignment completed");
// Due to master.assignRegion(hri) could fail to assign a region to a specified RS
// therefore, we need make sure that regions are in the expected RS
verifyRegionLocation(hrs, enabledAndAssignedRegions);
verifyRegionLocation(hrs, disabledAndAssignedRegions);
verifyRegionLocation(hrsDead, enabledAndOnDeadRegions);
verifyRegionLocation(hrsDead, disabledAndOnDeadRegions);
assertTrue(" Didn't get enough regions of enabledTalbe on live rs.",
enabledAndAssignedRegions.size() >= 2);
assertTrue(" Didn't get enough regions of disalbedTable on live rs.",
disabledAndAssignedRegions.size() >= 2);
assertTrue(" Didn't get enough regions of enabledTalbe on dead rs.",
enabledAndOnDeadRegions.size() >= 2);
assertTrue(" Didn't get enough regions of disalbedTable on dead rs.",
disabledAndOnDeadRegions.size() >= 2);
// Stop the master
log("Aborting master");
cluster.abortMaster(0);
cluster.waitOnMaster(0);
log("Master has aborted");
/*
* Now, let's start mocking up some weird states as described in the method
* javadoc.
*/
List<HRegionInfo> regionsThatShouldBeOnline = new ArrayList<HRegionInfo>();
List<HRegionInfo> regionsThatShouldBeOffline = new ArrayList<HRegionInfo>();
log("Beginning to mock scenarios");
// Disable the disabledTable in ZK
ZKTable zktable = new ZKTable(zkw);
zktable.setDisabledTable(Bytes.toString(disabledTable));
assertTrue(" The enabled table should be identified on master fail over.",
zktable.isEnabledTable("enabledTable"));
/*
* ZK = CLOSING
*/
// Region of enabled table being closed on dead RS but not finished
HRegionInfo region = enabledAndOnDeadRegions.remove(0);
regionsThatShouldBeOnline.add(region);
ZKAssign.createNodeClosing(zkw, region, deadServerName);
LOG.debug("\n\nRegion of enabled table was CLOSING on dead RS\n" +
region + "\n\n");
// Region of disabled table being closed on dead RS but not finished
region = disabledAndOnDeadRegions.remove(0);
regionsThatShouldBeOffline.add(region);
ZKAssign.createNodeClosing(zkw, region, deadServerName);
LOG.debug("\n\nRegion of disabled table was CLOSING on dead RS\n" +
region + "\n\n");
/*
* ZK = CLOSED
*/
// Region of enabled on dead server gets closed but not ack'd by master
region = enabledAndOnDeadRegions.remove(0);
regionsThatShouldBeOnline.add(region);
int version = ZKAssign.createNodeClosing(zkw, region, deadServerName);
ZKAssign.transitionNodeClosed(zkw, region, deadServerName, version);
LOG.debug("\n\nRegion of enabled table was CLOSED on dead RS\n" +
region + "\n\n");
// Region of disabled on dead server gets closed but not ack'd by master
region = disabledAndOnDeadRegions.remove(0);
regionsThatShouldBeOffline.add(region);
version = ZKAssign.createNodeClosing(zkw, region, deadServerName);
ZKAssign.transitionNodeClosed(zkw, region, deadServerName, version);
LOG.debug("\n\nRegion of disabled table was CLOSED on dead RS\n" +
region + "\n\n");
/*
* ZK = OPENING
*/
// RS was opening a region of enabled table then died
region = enabledRegions.remove(0);
regionsThatShouldBeOnline.add(region);
ZKAssign.createNodeOffline(zkw, region, deadServerName);
ZKAssign.transitionNodeOpening(zkw, region, deadServerName);
LOG.debug("\n\nRegion of enabled table was OPENING on dead RS\n" +
region + "\n\n");
// RS was opening a region of disabled table then died
region = disabledRegions.remove(0);
regionsThatShouldBeOffline.add(region);
ZKAssign.createNodeOffline(zkw, region, deadServerName);
ZKAssign.transitionNodeOpening(zkw, region, deadServerName);
LOG.debug("\n\nRegion of disabled table was OPENING on dead RS\n" +
region + "\n\n");
/*
* ZK = OPENED
*/
// Region of enabled table was opened on dead RS
region = enabledRegions.remove(0);
regionsThatShouldBeOnline.add(region);
ZKAssign.createNodeOffline(zkw, region, deadServerName);
hrsDead.openRegion(region);
while (true) {
RegionTransitionData rtd = ZKAssign.getData(zkw, region.getEncodedName());
if (rtd != null && rtd.getEventType() == EventType.RS_ZK_REGION_OPENED) {
break;
}
Thread.sleep(100);
}
LOG.debug("\n\nRegion of enabled table was OPENED on dead RS\n" +
region + "\n\n");
// Region of disabled table was opened on dead RS
region = disabledRegions.remove(0);
regionsThatShouldBeOffline.add(region);
ZKAssign.createNodeOffline(zkw, region, deadServerName);
hrsDead.openRegion(region);
while (true) {
RegionTransitionData rtd = ZKAssign.getData(zkw, region.getEncodedName());
if (rtd != null && rtd.getEventType() == EventType.RS_ZK_REGION_OPENED) {
break;
}
Thread.sleep(100);
}
LOG.debug("\n\nRegion of disabled table was OPENED on dead RS\n" +
region + "\n\n");
/*
* ZK = NONE
*/
// Region of enabled table was open at steady-state on dead RS
region = enabledRegions.remove(0);
regionsThatShouldBeOnline.add(region);
ZKAssign.createNodeOffline(zkw, region, deadServerName);
hrsDead.openRegion(region);
while (true) {
RegionTransitionData rtd = ZKAssign.getData(zkw, region.getEncodedName());
if (rtd != null && rtd.getEventType() == EventType.RS_ZK_REGION_OPENED) {
ZKAssign.deleteOpenedNode(zkw, region.getEncodedName());
break;
}
Thread.sleep(100);
}
LOG.debug("\n\nRegion of enabled table was open at steady-state on dead RS"
+ "\n" + region + "\n\n");
// Region of disabled table was open at steady-state on dead RS
region = disabledRegions.remove(0);
regionsThatShouldBeOffline.add(region);
ZKAssign.createNodeOffline(zkw, region, deadServerName);
hrsDead.openRegion(region);
while (true) {
RegionTransitionData rtd = ZKAssign.getData(zkw, region.getEncodedName());
if (rtd != null && rtd.getEventType() == EventType.RS_ZK_REGION_OPENED) {
ZKAssign.deleteOpenedNode(zkw, region.getEncodedName());
break;
}
Thread.sleep(100);
}
LOG.debug("\n\nRegion of disabled table was open at steady-state on dead RS"
+ "\n" + region + "\n\n");
/*
* DONE MOCKING
*/
log("Done mocking data up in ZK");
// Kill the RS that had a hard death
log("Killing RS " + deadServerName);
hrsDead.abort("Killing for unit test");
log("RS " + deadServerName + " killed");
// Start up a new master. Wait until regionserver is completely down
// before starting new master because of hbase-4511.
while (hrsDeadThread.isAlive()) {
Threads.sleep(10);
}
log("Starting up a new master");
master = cluster.startMaster().getMaster();
log("Waiting for master to be ready");
assertTrue(cluster.waitForActiveAndReadyMaster());
log("Master is ready");
// Let's add some weird states to master in-memory state
// After HBASE-3181, we need to have some ZK state if we're PENDING_OPEN
// b/c it is impossible for us to get into this state w/o a zk node
// this is not true of PENDING_CLOSE
// PENDING_OPEN and enabled
region = enabledRegions.remove(0);
regionsThatShouldBeOnline.add(region);
master.assignmentManager.regionsInTransition.put(region.getEncodedName(),
new RegionState(region, RegionState.State.PENDING_OPEN, 0, null));
ZKAssign.createNodeOffline(zkw, region, master.getServerName());
// PENDING_OPEN and disabled
region = disabledRegions.remove(0);
regionsThatShouldBeOffline.add(region);
master.assignmentManager.regionsInTransition.put(region.getEncodedName(),
new RegionState(region, RegionState.State.PENDING_OPEN, 0, null));
ZKAssign.createNodeOffline(zkw, region, master.getServerName());
// This test is bad. It puts up a PENDING_CLOSE but doesn't say what
// server we were PENDING_CLOSE against -- i.e. an entry in
// AssignmentManager#regions. W/o a server, we NPE trying to resend close.
// In past, there was wonky logic that had us reassign region if no server
// at tail of the unassign. This was removed. Commenting out for now.
// TODO: Remove completely.
/*
// PENDING_CLOSE and enabled
region = enabledRegions.remove(0);
LOG.info("Setting PENDING_CLOSE enabled " + region.getEncodedName());
regionsThatShouldBeOnline.add(region);
master.assignmentManager.regionsInTransition.put(region.getEncodedName(),
new RegionState(region, RegionState.State.PENDING_CLOSE, 0));
// PENDING_CLOSE and disabled
region = disabledRegions.remove(0);
LOG.info("Setting PENDING_CLOSE disabled " + region.getEncodedName());
regionsThatShouldBeOffline.add(region);
master.assignmentManager.regionsInTransition.put(region.getEncodedName(),
new RegionState(region, RegionState.State.PENDING_CLOSE, 0));
*/
// Failover should be completed, now wait for no RIT
log("Waiting for no more RIT");
ZKAssign.blockUntilNoRIT(zkw);
log("No more RIT in ZK");
long now = System.currentTimeMillis();
final long maxTime = 120000;
boolean done = master.assignmentManager.waitUntilNoRegionsInTransition(maxTime);
if (!done) {
LOG.info("rit=" + master.assignmentManager.getRegionsInTransition());
}
long elapsed = System.currentTimeMillis() - now;
assertTrue("Elapsed=" + elapsed + ", maxTime=" + maxTime + ", done=" + done,
elapsed < maxTime);
log("No more RIT in RIT map, doing final test verification");
// Grab all the regions that are online across RSs
Set<HRegionInfo> onlineRegions = new TreeSet<HRegionInfo>();
for (JVMClusterUtil.RegionServerThread rst :
cluster.getRegionServerThreads()) {
try {
onlineRegions.addAll(rst.getRegionServer().getOnlineRegions());
} catch (org.apache.hadoop.hbase.regionserver.RegionServerStoppedException e) {
LOG.info("Got RegionServerStoppedException", e);
}
}
// Now, everything that should be online should be online
for (HRegionInfo hri : regionsThatShouldBeOnline) {
assertTrue("region=" + hri.getRegionNameAsString(), onlineRegions.contains(hri));
}
// Everything that should be offline should not be online
for (HRegionInfo hri : regionsThatShouldBeOffline) {
assertFalse(onlineRegions.contains(hri));
}
log("Done with verification, all passed, shutting down cluster");
// Done, shutdown the cluster
TEST_UTIL.shutdownMiniCluster();
}