// validate accessibility
validateDatastore(dsNames, vcClusters);
validateNetworkAccessibility(cluster.getName(), cluster.fetchNetworkNameList(), vcClusters);
NodeGroupEntity group =
clusterEntityMgr.findByName(cluster, nodeGroupName);
if (group == null) {
logger.error("nodegroup " + nodeGroupName + " of cluster "
+ clusterName + " does not exist");
throw ClusterManagerException.NODEGROUP_NOT_FOUND_ERROR(nodeGroupName);
}
AuAssert.check(!group.getRoleNameList().isEmpty(), "roles should not be empty");
SoftwareManager softMgr =
softwareManagerCollector
.getSoftwareManager(cluster.getAppManager());
List<String> unsupportedRoles =
softMgr.validateScaling(clusterEntityMgr
.toNodeGroupInfo(clusterName, nodeGroupName));
if (!unsupportedRoles.isEmpty()) {
logger.info("can not resize node group with role: " + unsupportedRoles);
throw ClusterManagerException.ROLES_NOT_SUPPORTED(unsupportedRoles);
}
if (!cluster.getStatus().isActiveServiceStatus()) {
logger.error("cluster " + clusterName
+ " can be resized only in RUNNING status, it is now in "
+ cluster.getStatus() + " status");
throw ClusterManagerException.UPDATE_NOT_ALLOWED_ERROR(clusterName,
"To update a cluster, its status must be RUNNING");
}
if (instanceNum <= group.getDefineInstanceNum()) {
logger.error("node group " + nodeGroupName
+ " cannot be shrinked from " + group.getDefineInstanceNum()
+ " to " + instanceNum + " nodes");
throw ClusterManagerException.SHRINK_OP_NOT_SUPPORTED(nodeGroupName,
instanceNum, group.getDefineInstanceNum());
}
Integer instancePerHost = group.getInstancePerHost();
if (instancePerHost != null && instanceNum % instancePerHost != 0) {
throw BddException
.INVALID_PARAMETER(
"instance number",
new StringBuilder(100)
.append(instanceNum)
.append(
".instanceNum must be evenly divisible by instancePerHost")
.toString());
}
ValidationUtils.validHostNumber(clusterEntityMgr, group, instanceNum);
ValidationUtils.hasEnoughHost(rackInfoMgr, clusterEntityMgr, group,
instanceNum);
int oldInstanceNum = group.getDefineInstanceNum();
group.setDefineInstanceNum(instanceNum);
clusterEntityMgr.update(group);
clusterEntityMgr.cleanupActionError(clusterName);
// create job
Map<String, JobParameter> param = new TreeMap<String, JobParameter>();
param.put(JobConstants.CLUSTER_NAME_JOB_PARAM, new JobParameter(
clusterName));
param.put(JobConstants.GROUP_NAME_JOB_PARAM, new JobParameter(
nodeGroupName));
param.put(JobConstants.GROUP_INSTANCE_NEW_NUMBER_JOB_PARAM,
new JobParameter(Long.valueOf(instanceNum)));
param.put(JobConstants.GROUP_INSTANCE_OLD_NUMBER_JOB_PARAM,
new JobParameter(Long.valueOf(oldInstanceNum)));
param.put(JobConstants.TIMESTAMP_JOB_PARAM, new JobParameter(new Date()));
param.put(JobConstants.CLUSTER_SUCCESS_STATUS_JOB_PARAM,
new JobParameter(ClusterStatus.RUNNING.name()));
param.put(JobConstants.CLUSTER_FAILURE_STATUS_JOB_PARAM,
new JobParameter(ClusterStatus.RUNNING.name()));
param.put(JobConstants.VERIFY_NODE_STATUS_SCOPE_PARAM, new JobParameter(
JobConstants.GROUP_NODE_SCOPE_VALUE));
JobParameters jobParameters = new JobParameters(param);
clusterEntityMgr.updateClusterStatus(clusterName, ClusterStatus.UPDATING);
try {
return jobManager.runJob(JobConstants.RESIZE_CLUSTER_JOB_NAME,
jobParameters);
} catch (Exception e) {
logger.error("Failed to resize cluster " + clusterName, e);
clusterEntityMgr.updateClusterStatus(clusterName,
ClusterStatus.RUNNING);
group.setDefineInstanceNum(oldInstanceNum);
clusterEntityMgr.update(group);
throw e;
}
}