v.destroy(conn);
} else if ( vRec.powerState == VmPowerState.RUNNING ) {
String host = vRec.residentOn.getUuid(conn);
String msg = "VM " + vmName + " is runing on host " + host;
s_logger.debug(msg);
return new StartAnswer(cmd, msg, host);
} else {
String msg = "There is already a VM having the same name " + vmName + " vm record " + vRec.toString();
s_logger.warn(msg);
return new StartAnswer(cmd, msg);
}
}
}
synchronized (_cluster.intern()) {
s_vms.put(_cluster, _name, vmName, State.Starting);
}
s_logger.debug("1. The VM " + vmName + " is in Starting state.");
Host host = Host.getByUuid(conn, _host.uuid);
vm = createVmFromTemplate(conn, vmSpec, host);
for (DiskTO disk : vmSpec.getDisks()) {
VDI newVdi = prepareManagedDisk(conn, disk, vmName);
if (newVdi != null) {
String path = newVdi.getUuid(conn);
iqnToPath.put(disk.getDetails().get(DiskTO.IQN), path);
}
createVbd(conn, disk, vmName, vm, vmSpec.getBootloader(), newVdi);
}
if (vmSpec.getType() != VirtualMachine.Type.User) {
createPatchVbd(conn, vmName, vm);
}
for (NicTO nic : vmSpec.getNics()) {
createVif(conn, vmName, vm, vmSpec, nic);
}
startVM(conn, host, vm, vmName);
if (_isOvs) {
// TODO(Salvatore-orlando): This code should go
for (NicTO nic : vmSpec.getNics()) {
if (nic.getBroadcastType() == Networks.BroadcastDomainType.Vswitch) {
HashMap<String, String> args = parseDefaultOvsRuleComamnd(BroadcastDomainType.getValue(nic.getBroadcastUri()));
OvsSetTagAndFlowCommand flowCmd = new OvsSetTagAndFlowCommand(args.get("vmName"), args.get("tag"), args.get("vlans"),
args.get("seqno"), Long.parseLong(args.get("vmId")));
OvsSetTagAndFlowAnswer r = execute(flowCmd);
if (!r.getResult()) {
s_logger.warn("Failed to set flow for VM " + r.getVmId());
} else {
s_logger.info("Success to set flow for VM " + r.getVmId());
}
}
}
}
cleanUpTmpDomVif(conn);
if (_canBridgeFirewall) {
String result = null;
if (vmSpec.getType() != VirtualMachine.Type.User) {
NicTO[] nics = vmSpec.getNics();
boolean secGrpEnabled = false;
for (NicTO nic : nics) {
if (nic.isSecurityGroupEnabled() || (nic.getIsolationUri() != null
&& nic.getIsolationUri().getScheme().equalsIgnoreCase(IsolationType.Ec2.toString()))) {
secGrpEnabled = true;
break;
}
}
if (secGrpEnabled) {
result = callHostPlugin(conn, "vmops", "default_network_rules_systemvm", "vmName", vmName);
if (result == null || result.isEmpty() || !Boolean.parseBoolean(result)) {
s_logger.warn("Failed to program default network rules for " + vmName);
} else {
s_logger.info("Programmed default network rules for " + vmName);
}
}
} else {
//For user vm, program the rules for each nic if the isolation uri scheme is ec2
NicTO[] nics = vmSpec.getNics();
for (NicTO nic : nics) {
if ( nic.isSecurityGroupEnabled() || nic.getIsolationUri() != null
&& nic.getIsolationUri().getScheme().equalsIgnoreCase(IsolationType.Ec2.toString())) {
List<String> nicSecIps = nic.getNicSecIps();
String secIpsStr;
StringBuilder sb = new StringBuilder();
if (nicSecIps != null) {
for (String ip : nicSecIps) {
sb.append(ip).append(":");
}
secIpsStr = sb.toString();
} else {
secIpsStr = "0:";
}
result = callHostPlugin(conn, "vmops", "default_network_rules", "vmName", vmName, "vmIP", nic.getIp(), "vmMAC", nic.getMac(), "vmID", Long.toString(vmSpec.getId()), "secIps", secIpsStr);
if (result == null || result.isEmpty() || !Boolean.parseBoolean(result)) {
s_logger.warn("Failed to program default network rules for " + vmName+" on nic with ip:"+nic.getIp()+" mac:"+nic.getMac());
} else {
s_logger.info("Programmed default network rules for " + vmName+" on nic with ip:"+nic.getIp()+" mac:"+nic.getMac());
}
}
}
}
}
state = State.Running;
StartAnswer startAnswer = new StartAnswer(cmd);
startAnswer.setIqnToPath(iqnToPath);
return startAnswer;
} catch (Exception e) {
s_logger.warn("Catch Exception: " + e.getClass().toString() + " due to " + e.toString(), e);
String msg = handleVmStartFailure(conn, vmName, vm, "", e);
StartAnswer startAnswer = new StartAnswer(cmd, msg);
startAnswer.setIqnToPath(iqnToPath);
return startAnswer;
} finally {
synchronized (_cluster.intern()) {
if (state != State.Stopped) {