final String tarStr)
throws ManageException {
final LockManager lockManager = resource.getLockManager();
final Lock lock = lockManager.getLock(id);
final Lock destroy_lock = lockManager.getLock("destroy_" + id);
if (current >= STATE_CANCELLING_STAGING_IN
&& current <= STATE_CANCELLING_STAGING_OUT) {
// since setState(STATE_DESTROYING) is called before any
// invocation to a CANCELLING command, this should never be
// the case (because setState doesn't call StateTransition.run()
// if target has already been set to DESTROYING (target =
// STATE_DESTROYING is only the case here in this handler when
// setTargetState first changes it to DESTROYING
logger.fatal("programming error, stopping state transition");
// stop processing
return true;
// TODO: In the future an action should be allowed to be
// cancelled without its target being set to DESTROYING
// first. When that happens, this assumption above will
// need to change (add another handler before this remove
// handler to handle resources with a cancelling state or
// cancel target state. A smart scheduler could for
// example just want cancelPropagate to execute because
// it got a priority request and needs the network, i.e.,
// postpone functionality.
}
if (target != STATE_DESTROYING) {
return false;
}
if (this.trace) {
logger.trace("\n\n ***** ST--remove: processing " +
idStr + ", current = " + curStr + ", target = " +
tarStr + "\n");
}
final WorkspaceRequestContext requestContext =
new WorkspaceRequestContext(id, resource.getName(),
this.locator, this.lager);
requestContext.setGroupID(resource.getGroupId());
requestContext.setGroupSize(resource.getGroupSize());
if (resource.isLastInGroup()) {
requestContext.setLastInGroup(true);
resource.setLastInGroup(false);
}
requestContext.setPartOfGroupRequest(resource.isPartOfGroupRequest());
WorkspaceRequest req = null;
int nextstate = STATE_INVALID;
switch (current) {
case STATE_DESTROY_SUCCEEDED:
break;
case STATE_DESTROY_FAILED:
req = reqFactory.cancelAllAtVMM();
nextstate = STATE_CANCELLING_AT_VMM;
requestContext.setVm(resource.getVM());
break;
case STATE_STAGING_IN: // now unused
case STATE_UNPROPAGATED:
req = reqFactory.cancelUnpropagated();
nextstate = STATE_CANCELLING_UNPROPAGATED;
requestContext.setVm(resource.getVM());
break;
case STATE_PROPAGATING:
req = reqFactory.cancelPropagating();
nextstate = STATE_CANCELLING_PROPAGATING;
requestContext.setVm(resource.getVM());
break;
case STATE_PROPAGATING_TO_START:
req = reqFactory.cancelPropagatingToStart();
nextstate = STATE_CANCELLING_PROPAGATING_TO_START;
requestContext.setVm(resource.getVM());
break;
case STATE_PROPAGATING_TO_PAUSE:
req = reqFactory.cancelPropagatingToPause();
nextstate = STATE_CANCELLING_PROPAGATING_TO_PAUSE;
requestContext.setVm(resource.getVM());
break;
case STATE_PROPAGATED:
case STATE_STARTING:
case STATE_STARTED:
case STATE_SERIALIZING:
case STATE_SERIALIZED:
case STATE_PAUSING:
case STATE_PAUSED:
case STATE_SHUTTING_DOWN:
req = reqFactory.cancelAllAtVMM();
nextstate = STATE_CANCELLING_AT_VMM;
requestContext.setVm(resource.getVM());
break;
case STATE_READYING_FOR_TRANSPORT:
req = reqFactory.cancelReadyingForTransport();
nextstate = STATE_CANCELLING_READYING_FOR_TRANSPORT;
requestContext.setVm(resource.getVM());
break;
case STATE_READY_FOR_TRANSPORT:
req = reqFactory.cancelReadyForTransport();
nextstate = STATE_CANCELLING_READY_FOR_TRANSPORT;
requestContext.setVm(resource.getVM());
break;
case STATE_STAGING_OUT: // now unused
default:
}
if (current >= STATE_CORRUPTED) {
// currently we will try to do something about a workspace
// corrupted at times that may have left image files or state
// at the backend node, we do not handle other corrupt-*
// situations now.
final int oldstate = current - STATE_CORRUPTED;
if (oldstate >= STATE_PROPAGATING
&& oldstate < STATE_READYING_FOR_TRANSPORT) {
req = reqFactory.cancelAllAtVMM();
nextstate = STATE_CANCELLING_AT_VMM;
requestContext.setVm(resource.getVM());
} else {
// candidate for admin log/trigger of severe issues
final String err = "Destroying a corrupted " +
"resource in state '" + curStr +
"'. That state does not indicate files or " +
"cruft may be on VMM node, not doing anything" +
" (but there may be stray staged files off-VMM).";
if (this.event) {
logger.info(Lager.ev(id) + err);
} else if (this.trace) {
logger.trace(idStr + err);
}
}
}
if (req != null) {
resource.setStateUnderLock(nextstate, null);
requestContext.setNotify(STATE_DESTROYING);
req.setRequestContext(requestContext);
if (this.trace) {
logger.trace("\n\n ***** ST--remove: " + idStr
+ ", executing " + req.toString() + "\n");
}
try {
destroy_lock.lockInterruptibly();
} catch (InterruptedException e) {
throw new ManageException(e.getMessage(), e);
}
lock.unlock();
// TODO: add a timeout
try {
req.execute(); // could block
} catch (Throwable t) {
// candidate for admin log/trigger of severe issues
logger.error("",t);
} finally {
try {
lock.lockInterruptibly();
destroy_lock.unlock();
} catch (InterruptedException e) {
throw new ManageException(e.getMessage(), e);
}
}