@Override
public boolean maintain(DataStore store) {
Long userId = UserContext.current().getCallerUserId();
User user = _userDao.findById(userId);
Account account = UserContext.current().getCaller();
StoragePoolVO pool = this.primaryDataStoreDao.findById(store.getId());
try {
List<StoragePoolVO> spes = null;
// Handling Zone and Cluster wide storage scopes.
// if the storage is ZONE wide then we pass podid and cluster id as null as they will be empty for ZWPS
if (pool.getScope() == ScopeType.ZONE) {
spes = primaryDataStoreDao.listBy(
pool.getDataCenterId(), null,
null, ScopeType.ZONE);
}
else {
spes = primaryDataStoreDao.listBy(
pool.getDataCenterId(), pool.getPodId(),
pool.getClusterId(), ScopeType.CLUSTER);
}
for (StoragePoolVO sp : spes) {
if (sp.getStatus() == StoragePoolStatus.PrepareForMaintenance) {
throw new CloudRuntimeException("Only one storage pool in a cluster can be in PrepareForMaintenance mode, " + sp.getId()
+ " is already in PrepareForMaintenance mode ");
}
}
StoragePool storagePool = (StoragePool) store;
//Handeling the Zone wide and cluster wide primay storage
List<HostVO> hosts = new ArrayList<HostVO>();
// if the storage scope is ZONE wide, then get all the hosts for which hypervisor ZWSP created to send Modifystoragepoolcommand
//TODO: if it's zone wide, this code will list a lot of hosts in the zone, which may cause performance/OOM issue.
if (pool.getScope().equals(ScopeType.ZONE)) {
hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(pool.getHypervisor() , pool.getDataCenterId());
} else {
hosts = _resourceMgr.listHostsInClusterByStatus(
pool.getClusterId(), Status.Up);
}
if (hosts == null || hosts.size() == 0) {
pool.setStatus(StoragePoolStatus.Maintenance);
primaryDataStoreDao.update(pool.getId(), pool);
return true;
} else {
// set the pool state to prepare for maintenance
pool.setStatus(StoragePoolStatus.PrepareForMaintenance);
primaryDataStoreDao.update(pool.getId(), pool);
}
// remove heartbeat
for (HostVO host : hosts) {
ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(
false, storagePool);
final Answer answer = agentMgr.easySend(host.getId(), cmd);
if (answer == null || !answer.getResult()) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("ModifyStoragePool false failed due to "
+ ((answer == null) ? "answer null" : answer
.getDetails()));
}
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("ModifyStoragePool false succeeded");
}
}
}
// check to see if other ps exist
// if they do, then we can migrate over the system vms to them
// if they dont, then just stop all vms on this one
List<StoragePoolVO> upPools = primaryDataStoreDao
.listByStatusInZone(pool.getDataCenterId(),
StoragePoolStatus.Up);
boolean restart = true;
if (upPools == null || upPools.size() == 0) {
restart = false;
}
// 2. Get a list of all the ROOT volumes within this storage pool
List<VolumeVO> allVolumes = this.volumeDao.findByPoolId(pool
.getId());
// 3. Enqueue to the work queue
for (VolumeVO volume : allVolumes) {
VMInstanceVO vmInstance = vmDao
.findById(volume.getInstanceId());
if (vmInstance == null) {
continue;
}
// enqueue sp work
if (vmInstance.getState().equals(State.Running)
|| vmInstance.getState().equals(State.Starting)
|| vmInstance.getState().equals(State.Stopping)) {
try {
StoragePoolWorkVO work = new StoragePoolWorkVO(
vmInstance.getId(), pool.getId(), false, false,
server.getId());
_storagePoolWorkDao.persist(work);
} catch (Exception e) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Work record already exists, re-using by re-setting values");
}
StoragePoolWorkVO work = _storagePoolWorkDao
.findByPoolIdAndVmId(pool.getId(),
vmInstance.getId());
work.setStartedAfterMaintenance(false);
work.setStoppedForMaintenance(false);
work.setManagementServerId(server.getId());
_storagePoolWorkDao.update(work.getId(), work);
}
}
}
// 4. Process the queue
List<StoragePoolWorkVO> pendingWork = _storagePoolWorkDao
.listPendingWorkForPrepareForMaintenanceByPoolId(pool
.getId());
for (StoragePoolWorkVO work : pendingWork) {
// shut down the running vms
VMInstanceVO vmInstance = vmDao.findById(work.getVmId());
if (vmInstance == null) {
continue;
}
// if the instance is of type consoleproxy, call the console
// proxy
if (vmInstance.getType().equals(
VirtualMachine.Type.ConsoleProxy)) {
// call the consoleproxymanager
ConsoleProxyVO consoleProxy = _consoleProxyDao
.findById(vmInstance.getId());
if (!vmMgr.advanceStop(consoleProxy, true, user, account)) {
String errorMsg = "There was an error stopping the console proxy id: "
+ vmInstance.getId()
+ " ,cannot enable storage maintenance";
s_logger.warn(errorMsg);
throw new CloudRuntimeException(errorMsg);
} else {
// update work status
work.setStoppedForMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
if (restart) {
if (this.vmMgr.advanceStart(consoleProxy, null, user,
account, null) == null) {
String errorMsg = "There was an error starting the console proxy id: "
+ vmInstance.getId()
+ " on another storage pool, cannot enable primary storage maintenance";
s_logger.warn(errorMsg);
} else {
// update work status
work.setStartedAfterMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
}
}
// if the instance is of type uservm, call the user vm manager
if (vmInstance.getType().equals(VirtualMachine.Type.User)) {
UserVmVO userVm = userVmDao.findById(vmInstance.getId());
if (!vmMgr.advanceStop(userVm, true, user, account)) {
String errorMsg = "There was an error stopping the user vm id: "
+ vmInstance.getId()
+ " ,cannot enable storage maintenance";
s_logger.warn(errorMsg);
throw new CloudRuntimeException(errorMsg);
} else {
// update work status
work.setStoppedForMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
}
// if the instance is of type secondary storage vm, call the
// secondary storage vm manager
if (vmInstance.getType().equals(
VirtualMachine.Type.SecondaryStorageVm)) {
SecondaryStorageVmVO secStrgVm = _secStrgDao
.findById(vmInstance.getId());
if (!vmMgr.advanceStop(secStrgVm, true, user, account)) {
String errorMsg = "There was an error stopping the ssvm id: "
+ vmInstance.getId()
+ " ,cannot enable storage maintenance";
s_logger.warn(errorMsg);
throw new CloudRuntimeException(errorMsg);
} else {
// update work status
work.setStoppedForMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
if (restart) {
if (vmMgr.advanceStart(secStrgVm, null, user, account, null) == null) {
String errorMsg = "There was an error starting the ssvm id: "
+ vmInstance.getId()
+ " on another storage pool, cannot enable primary storage maintenance";
s_logger.warn(errorMsg);
} else {
// update work status
work.setStartedAfterMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
}
}
// if the instance is of type domain router vm, call the network
// manager
if (vmInstance.getType().equals(
VirtualMachine.Type.DomainRouter)) {
DomainRouterVO domR = _domrDao.findById(vmInstance.getId());
if (!vmMgr.advanceStop(domR, true, user, account)) {
String errorMsg = "There was an error stopping the domain router id: "
+ vmInstance.getId()
+ " ,cannot enable primary storage maintenance";
s_logger.warn(errorMsg);
throw new CloudRuntimeException(errorMsg);
} else {
// update work status
work.setStoppedForMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
if (restart) {
if (vmMgr.advanceStart(domR, null, user, account, null) == null) {
String errorMsg = "There was an error starting the domain router id: "
+ vmInstance.getId()
+ " on another storage pool, cannot enable primary storage maintenance";
s_logger.warn(errorMsg);
} else {
// update work status
work.setStartedAfterMaintenance(true);
_storagePoolWorkDao.update(work.getId(), work);
}
}
}
}
} catch(Exception e) {
s_logger.error(
"Exception in enabling primary storage maintenance:", e);
pool.setStatus(StoragePoolStatus.ErrorInMaintenance);
this.primaryDataStoreDao.update(pool.getId(), pool);
throw new CloudRuntimeException(e.getMessage());
}
return true;
}