String appManager = cluster.getAppManager();
if (appManager == null) {
appManager = Constants.IRONFAN;
}
SoftwareManager softwareManager = getSoftwareManager(appManager);
HadoopStack stack =
filterDistroFromAppManager(softwareManager, cluster.getDistro());
if (cluster.getDistro() == null || stack == null) {
throw BddException.INVALID_PARAMETER("distro", cluster.getDistro());
}
// only check roles validity in server side, but not in CLI and GUI, because roles info exist in server side.
ClusterBlueprint blueprint = cluster.toBlueprint();
try {
softwareManager.validateBlueprint(cluster.toBlueprint());
cluster.validateClusterCreate(failedMsgList, warningMsgList);
} catch (ValidationException e) {
failedMsgList.addAll(e.getFailedMsgList());
warningMsgList.addAll(e.getWarningMsgList());
}
if (!failedMsgList.isEmpty()) {
throw ClusterConfigException.INVALID_SPEC(failedMsgList);
}
if (!validateRacksInfo(cluster, failedMsgList)) {
throw ClusterConfigException.INVALID_PLACEMENT_POLICIES(failedMsgList);
}
String localRepoURL = cluster.getLocalRepoURL();
if (!CommonUtil.isBlank(localRepoURL)
&& !validateLocalRepoURL(localRepoURL)) {
throw ClusterConfigException.INVALID_LOCAL_REPO_URL(failedMsgList);
}
try {
ClusterEntity entity = clusterEntityMgr.findByName(name);
if (entity != null) {
logger.info("can not create cluster " + name
+ ", which is already existed.");
throw BddException.ALREADY_EXISTS("Cluster", name);
}
updateInfrastructure(cluster, softwareManager, blueprint);
// persist cluster config
logger.debug("begin to add cluster config for " + name);
Gson gson = new Gson();
ClusterEntity clusterEntity = new ClusterEntity(name);
clusterEntity.setAppManager(cluster.getAppManager());
clusterEntity.setDistro(cluster.getDistro());
clusterEntity.setDistroVendor(cluster.getDistroVendor());
clusterEntity.setDistroVersion(cluster.getDistroVersion());
clusterEntity.setStartAfterDeploy(true);
clusterEntity.setPassword(cluster.getPassword());
// set cluster version
clusterEntity.setVersion(clusterEntityMgr.getServerVersion());
if (cluster.containsComputeOnlyNodeGroups(softwareManager)) {
clusterEntity.setAutomationEnable(automationEnable);
} else {
clusterEntity.setAutomationEnable(null);
}
clusterEntity.setVhmMinNum(-1);
clusterEntity.setVhmMaxNum(-1);
if (cluster.getRpNames() != null && cluster.getRpNames().size() > 0) {
logger.debug("resource pool " + cluster.getRpNames()
+ " specified for cluster " + name);
clusterEntity.setVcRpNameList(cluster.getRpNames());
} else {
logger.debug("no resource pool name specified, use global configuration.");
}
if (cluster.getDsNames() != null && !cluster.getDsNames().isEmpty()) {
logger.debug("datastore " + cluster.getDsNames()
+ " specified for cluster " + name);
clusterEntity.setVcDatastoreNameList(cluster.getDsNames());
} else {
logger.debug("no datastore name specified, use global configuration.");
}
clusterEntity.setNetworkConfig(validateAndConvertNetNamesToNetConfigs(
cluster.getNetworkConfig(), cluster.getDistroVendor()
.equalsIgnoreCase(Constants.MAPR_VENDOR)));
clusterEntity.setVhmJobTrackerPort("50030");
if (cluster.getConfiguration() != null
&& cluster.getConfiguration().size() > 0) {
clusterEntity.setHadoopConfig((new Gson()).toJson(cluster
.getConfiguration()));
updateVhmJobTrackerPort(cluster, clusterEntity);
}
setAdvancedProperties(cluster.getExternalHDFS(),
cluster.getExternalMapReduce(), localRepoURL, clusterEntity);
NodeGroupCreate[] groups = cluster.getNodeGroups();
if (groups != null && groups.length > 0) {
clusterEntity.setNodeGroups(convertNodeGroupsToEntities(gson,
clusterEntity, cluster.getDistro(), groups,
cluster.isValidateConfig()));
//make sure memory size is no less than MIN_MEM_SIZE
validateMemorySize(clusterEntity.getNodeGroups(), failedMsgList);
if (!failedMsgList.isEmpty()) {
throw ClusterConfigException.INVALID_SPEC(failedMsgList);
}
}
if (cluster.getTopologyPolicy() == null) {
clusterEntity.setTopologyPolicy(TopologyType.NONE);
} else {
clusterEntity.setTopologyPolicy(cluster.getTopologyPolicy());
}
if (clusterEntity.getTopologyPolicy() == TopologyType.HVE) {
boolean hveSupported = false;
if (clusterEntity.getDistro() != null) {
HadoopStack hadoopStack =
filterDistroFromAppManager(softwareManager,
clusterEntity.getDistro());
if (hadoopStack != null) {
hveSupported = hadoopStack.isHveSupported();
}
}
if (!hveSupported) {
throw ClusterConfigException.INVALID_TOPOLOGY_POLICY(
clusterEntity.getTopologyPolicy(),