return;
}
}
String encodedName = region.getEncodedName();
// Grab the state of this region and synchronize on it
RegionState state;
int versionOfClosingNode = -1;
synchronized (regionsInTransition) {
state = regionsInTransition.get(encodedName);
if (state == null) {
// Create the znode in CLOSING state
try {
versionOfClosingNode = ZKAssign.createNodeClosing(
master.getZooKeeper(), region, master.getServerName());
if (versionOfClosingNode == -1) {
LOG.debug("Attempting to unassign region " +
region.getRegionNameAsString() + " but ZK closing node "
+ "can't be created.");
return;
}
} catch (KeeperException e) {
if (e instanceof NodeExistsException) {
// Handle race between master initiated close and regionserver
// orchestrated splitting. See if existing node is in a
// SPLITTING or SPLIT state. If so, the regionserver started
// an op on node before we could get our CLOSING in. Deal.
NodeExistsException nee = (NodeExistsException)e;
String path = nee.getPath();
try {
if (isSplitOrSplitting(path)) {
LOG.debug(path + " is SPLIT or SPLITTING; " +
"skipping unassign because region no longer exists -- its split");
return;
}
} catch (KeeperException.NoNodeException ke) {
LOG.warn("Failed getData on SPLITTING/SPLIT at " + path +
"; presuming split and that the region to unassign, " +
encodedName + ", no longer exists -- confirm", ke);
return;
} catch (KeeperException ke) {
LOG.error("Unexpected zk state", ke);
ke = e;
}
}
// If we get here, don't understand whats going on -- abort.
master.abort("Unexpected ZK exception creating node CLOSING", e);
return;
}
state = new RegionState(region, RegionState.State.PENDING_CLOSE);
regionsInTransition.put(encodedName, state);
} else if (force && (state.isPendingClose() || state.isClosing())) {
LOG.debug("Attempting to unassign region " + region.getRegionNameAsString() +
" which is already " + state.getState() +
" but forcing to send a CLOSE RPC again ");
state.update(state.getState());
} else {
LOG.debug("Attempting to unassign region " +
region.getRegionNameAsString() + " but it is " +
"already in transition (" + state.getState() + ", force=" + force + ")");
return;
}
}
// Send CLOSE RPC
ServerName server = null;
synchronized (this.regions) {
server = regions.get(region);
}
try {
// TODO: We should consider making this look more like it does for the
// region open where we catch all throwables and never abort
if (serverManager.sendRegionClose(server, state.getRegion(),
versionOfClosingNode)) {
LOG.debug("Sent CLOSE to " + server + " for region " +
region.getRegionNameAsString());
return;
}
// This never happens. Currently regionserver close always return true.
LOG.warn("Server " + server + " region CLOSE RPC returned false for " +
region.getRegionNameAsString());
} catch (NotServingRegionException nsre) {
LOG.info("Server " + server + " returned " + nsre + " for " +
region.getRegionNameAsString());
// Presume that master has stale data. Presume remote side just split.
// Presume that the split message when it comes in will fix up the master's
// in memory cluster state.
} catch (Throwable t) {
if (t instanceof RemoteException) {
t = ((RemoteException)t).unwrapRemoteException();
if (t instanceof NotServingRegionException) {
if (checkIfRegionBelongsToDisabling(region)) {
// Remove from the regionsinTransition map
LOG.info("While trying to recover the table "
+ region.getTableNameAsString()
+ " to DISABLED state the region " + region
+ " was offlined but the table was in DISABLING state");
synchronized (this.regionsInTransition) {
this.regionsInTransition.remove(region.getEncodedName());
}
// Remove from the regionsMap
synchronized (this.regions) {
this.regions.remove(region);
}
}
}
// RS is already processing this region, only need to update the timestamp
if (t instanceof RegionAlreadyInTransitionException) {
LOG.debug("update " + state + " the timestamp.");
state.update(state.getState());
}
}
LOG.info("Server " + server + " returned " + t + " for " +
region.getEncodedName());
// Presume retry or server will expire.