Package com.cloud.org

Examples of com.cloud.org.Cluster


                    s_logger.debug("Looking for suitable pools for this host under zone: " + host.getDataCenterId() + ", pod: " + host.getPodId() + ", cluster: " +
                        host.getClusterId());
                }

                Pod pod = _podDao.findById(host.getPodId());
                Cluster cluster = _clusterDao.findById(host.getClusterId());

                if (vm.getHypervisorType() == HypervisorType.BareMetal) {
                    DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap<Volume, StoragePool>());
                    s_logger.debug("Returning Deployment Destination: " + dest);
                    return dest;
                }

                // search for storage under the zone, pod, cluster of the host.
                DataCenterDeployment lastPlan =
                    new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), hostIdSpecified, plan.getPoolId(), null,
                        plan.getReservationContext());

                Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL);
                Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
                List<Volume> readyAndReusedVolumes = result.second();

                // choose the potential pool for this VM for this host
                if (!suitableVolumeStoragePools.isEmpty()) {
                    List<Host> suitableHosts = new ArrayList<Host>();
                    suitableHosts.add(host);
                    Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(
                        suitableHosts, suitableVolumeStoragePools, avoids,
                        getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes);
                    if (potentialResources != null) {
                        Map<Volume, StoragePool> storageVolMap = potentialResources.second();
                        // remove the reused vol<->pool from destination, since
                        // we don't have to prepare this volume.
                        for (Volume vol : readyAndReusedVolumes) {
                            storageVolMap.remove(vol);
                        }
                        DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap);
                        s_logger.debug("Returning Deployment Destination: " + dest);
                        return dest;
                    }
                }
            }
            s_logger.debug("Cannnot deploy to specified host, returning.");
            return null;
        }

        if (vm.getLastHostId() != null && haVmTag == null) {
            s_logger.debug("This VM has last host_id specified, trying to choose the same host: " + vm.getLastHostId());

            HostVO host = _hostDao.findById(vm.getLastHostId());
            ServiceOfferingDetailsVO offeringDetails = null;
            if (host == null) {
                s_logger.debug("The last host of this VM cannot be found");
            } else if (avoids.shouldAvoid(host)) {
                s_logger.debug("The last host of this VM is in avoid set");
            } else if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)) {
                s_logger.debug("The last Host, hostId: " + host.getId() +
                    " already has max Running VMs(count includes system VMs), skipping this and trying other available hosts");
            } else if ((offeringDetails  = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.vgpuType.toString())) != null) {
                ServiceOfferingDetailsVO groupName = _serviceOfferingDetailsDao.findDetail(offering.getId(), GPU.Keys.pciDevice.toString());
                if(!_resourceMgr.isGPUDeviceAvailable(host.getId(), groupName.getValue(), offeringDetails.getValue())){
                    s_logger.debug("The last host of this VM does not have required GPU devices available");
                }
            } else {
                if (host.getStatus() == Status.Up && host.getResourceState() == ResourceState.Enabled) {
                    boolean hostTagsMatch = true;
                    if(offering.getHostTag() != null){
                        _hostDao.loadHostTags(host);
                        if (!(host.getHostTags() != null && host.getHostTags().contains(offering.getHostTag()))) {
                            hostTagsMatch = false;
                        }
                    }
                    if (hostTagsMatch) {
                    long cluster_id = host.getClusterId();
                        ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id,
                                "cpuOvercommitRatio");
                        ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id,
                                "memoryOvercommitRatio");
                    Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue());
                    Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue());
                        if (_capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true,
                                cpuOvercommitRatio, memoryOvercommitRatio, true)
                                && _capacityMgr.checkIfHostHasCpuCapability(host.getId(), offering.getCpu(),
                                        offering.getSpeed())) {
                        s_logger.debug("The last host of this VM is UP and has enough capacity");
                            s_logger.debug("Now checking for suitable pools under zone: " + host.getDataCenterId()
                                    + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId());
                            // search for storage under the zone, pod, cluster
                            // of
                        // the last host.


                        Pod pod = _podDao.findById(host.getPodId());
                        Cluster cluster = _clusterDao.findById(host.getClusterId());

                        if (vm.getHypervisorType() == HypervisorType.BareMetal) {
                            DeployDestination dest = new DeployDestination(dc, pod, cluster, host, new HashMap<Volume, StoragePool>());
                            s_logger.debug("Returning Deployment Destination: " + dest);
                            return dest;
View Full Code Here


        if (pod != null && Grouping.AllocationState.Disabled == pod.getAllocationState()) {
            s_logger.info("Pod is currently disabled, cannot allocate to this pod: " + podId);
            return false;
        }

        Cluster cluster = _clusterDao.findById(clusterId);
        if (cluster != null && Grouping.AllocationState.Disabled == cluster.getAllocationState()) {
            s_logger.info("Cluster is currently disabled, cannot allocate to this cluster: " + clusterId);
            return false;
        }

        return true;
View Full Code Here

       
    if (vm.getLastHostId() != null && haVmTag == null) {
      HostVO h = _hostDao.findById(vm.getLastHostId());
      DataCenter dc = _dcDao.findById(h.getDataCenterId());
      Pod pod = _podDao.findById(h.getPodId());
      Cluster c =  _clusterDao.findById(h.getClusterId());
      s_logger.debug("Start baremetal vm " + vm.getId() + " on last stayed host " + h.getId());
      return new DeployDestination(dc, pod, c, h);
    }
   
    if (haVmTag != null) {
View Full Code Here

                suitableHosts.add(host);

                Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools);
                if(potentialResources != null){
                    Pod pod = _podDao.findById(host.getPodId());
                    Cluster cluster = _clusterDao.findById(host.getClusterId());
                    Map<Volume, StoragePool> storageVolMap = potentialResources.second();
                    // remove the reused vol<->pool from destination, since we don't have to prepare this volume.
                    for(Volume vol : readyAndReusedVolumes){
                        storageVolMap.remove(vol);
                    }
                    DeployDestination dest =  new DeployDestination(dc, pod, cluster, host, storageVolMap);
                    s_logger.debug("Returning Deployment Destination: "+ dest);
                    return dest;
                }
            }
            s_logger.debug("Cannnot deploy to specified host, returning.");
            return null;
        }

        if (vm.getLastHostId() != null && haVmTag == null) {
            s_logger.debug("This VM has last host_id specified, trying to choose the same host: " +vm.getLastHostId());

            HostVO host = _hostDao.findById(vm.getLastHostId());
            if(host == null){
                s_logger.debug("The last host of this VM cannot be found");
            }else if(avoid.shouldAvoid(host)){
                s_logger.debug("The last host of this VM is in avoid set");
            }else if(_capacityMgr.checkIfHostReachMaxGuestLimit(host)){
                s_logger.debug("The last Host, hostId: "+ host.getId() +" already has max Running VMs(count includes system VMs), skipping this and trying other available hosts");
            }else{
                if (host.getStatus() == Status.Up && host.getResourceState() == ResourceState.Enabled) {
                    if(_capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true, cpuOverprovisioningFactor, true)){
                        s_logger.debug("The last host of this VM is UP and has enough capacity");
                        s_logger.debug("Now checking for suitable pools under zone: "+host.getDataCenterId() +", pod: "+ host.getPodId()+", cluster: "+ host.getClusterId());
                        //search for storage under the zone, pod, cluster of the last host.
                        DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(), null);
                        Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(vmProfile, lastPlan, avoid, HostAllocator.RETURN_UPTO_ALL);
                        Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
                        List<Volume> readyAndReusedVolumes = result.second();
                        //choose the potential pool for this VM for this host
                        if(!suitableVolumeStoragePools.isEmpty()){
                            List<Host> suitableHosts = new ArrayList<Host>();
                            suitableHosts.add(host);

                            Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools);
                            if(potentialResources != null){
                                Pod pod = _podDao.findById(host.getPodId());
                                Cluster cluster = _clusterDao.findById(host.getClusterId());
                                Map<Volume, StoragePool> storageVolMap = potentialResources.second();
                                // remove the reused vol<->pool from destination, since we don't have to prepare this volume.
                                for(Volume vol : readyAndReusedVolumes){
                                    storageVolMap.remove(vol);
                                }
View Full Code Here

        }

        removeClustersCrossingThreshold(clusterList, avoid, vmProfile, plan);
       
        for(Long clusterId : clusterList){
            Cluster clusterVO = _clusterDao.findById(clusterId);

            if (clusterVO.getHypervisorType() != vmProfile.getHypervisorType()) {
                s_logger.debug("Cluster: "+clusterId + " has HyperVisorType that does not match the VM, skipping this cluster");
                avoid.addCluster(clusterVO.getId());
                continue;
            }
           
            s_logger.debug("Checking resources in Cluster: "+clusterId + " under Pod: "+clusterVO.getPodId());
            //search for resources(hosts and storage) under this zone, pod, cluster.
            DataCenterDeployment potentialPlan = new DataCenterDeployment(plan.getDataCenterId(), clusterVO.getPodId(), clusterVO.getId(), null, plan.getPoolId(), null, plan.getReservationContext());

            //find suitable hosts under this cluster, need as many hosts as we get.
            List<Host> suitableHosts = findSuitableHosts(vmProfile, potentialPlan, avoid, HostAllocator.RETURN_UPTO_ALL);
            //if found suitable hosts in this cluster, find suitable storage pools for each volume of the VM
            if(suitableHosts != null && !suitableHosts.isEmpty()){
                if (vmProfile.getHypervisorType() == HypervisorType.BareMetal) {
                    Pod pod = _podDao.findById(clusterVO.getPodId());
                    DeployDestination dest =  new DeployDestination(dc, pod, clusterVO, suitableHosts.get(0));
                    return dest;
                }

                Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(vmProfile, potentialPlan, avoid, StoragePoolAllocator.RETURN_UPTO_ALL);
                Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
                List<Volume> readyAndReusedVolumes = result.second();

                //choose the potential host and pool for the VM
                if(!suitableVolumeStoragePools.isEmpty()){
                    Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(suitableHosts, suitableVolumeStoragePools);

                    if(potentialResources != null){
                        Pod pod = _podDao.findById(clusterVO.getPodId());
                        Host host = _hostDao.findById(potentialResources.first().getId());
                        Map<Volume, StoragePool> storageVolMap = potentialResources.second();
                        // remove the reused vol<->pool from destination, since we don't have to prepare this volume.
                        for(Volume vol : readyAndReusedVolumes){
                            storageVolMap.remove(vol);
                        }
                        DeployDestination dest =  new DeployDestination(dc, pod, clusterVO, host, storageVolMap );
                        s_logger.debug("Returning Deployment Destination: "+ dest);
                        return dest;
                    }
                }else{
                    s_logger.debug("No suitable storagePools found under this Cluster: "+clusterId);
                }
            }else{
                s_logger.debug("No suitable hosts found under this Cluster: "+clusterId);
            }
            avoid.addCluster(clusterVO.getId());
        }
        s_logger.debug("Could not find suitable Deployment Destination for this VM under any clusters, returning. ");
        return null;
    }
View Full Code Here

        if(pod != null && Grouping.AllocationState.Disabled == pod.getAllocationState()){
            s_logger.info("Pod is currently disabled, cannot allocate to this pod: "+ podId);
            return false;
        }

        Cluster cluster = _clusterDao.findById(clusterId);
        if(cluster != null && Grouping.AllocationState.Disabled == cluster.getAllocationState()){
            s_logger.info("Cluster is currently disabled, cannot allocate to this cluster: "+ clusterId);
            return false;
        }

        return true;
View Full Code Here

                    Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(
                            suitableHosts, suitableVolumeStoragePools, avoids,
                            getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes);
                    if (potentialResources != null) {
                        Pod pod = _podDao.findById(host.getPodId());
                        Cluster cluster = _clusterDao.findById(host.getClusterId());
                        Map<Volume, StoragePool> storageVolMap = potentialResources.second();
                        // remove the reused vol<->pool from destination, since
                        // we don't have to prepare this volume.
                        for (Volume vol : readyAndReusedVolumes) {
                            storageVolMap.remove(vol);
                        }
                        DeployDestination dest = new DeployDestination(dc, pod, cluster, host, storageVolMap);
                        s_logger.debug("Returning Deployment Destination: " + dest);
                        return dest;
                    }
                }
            }
            s_logger.debug("Cannnot deploy to specified host, returning.");
            return null;
        }

        if (vm.getLastHostId() != null && haVmTag == null) {
            s_logger.debug("This VM has last host_id specified, trying to choose the same host: " + vm.getLastHostId());

            HostVO host = _hostDao.findById(vm.getLastHostId());
            if (host == null) {
                s_logger.debug("The last host of this VM cannot be found");
            } else if (avoids.shouldAvoid(host)) {
                s_logger.debug("The last host of this VM is in avoid set");
            } else if (_capacityMgr.checkIfHostReachMaxGuestLimit(host)) {
                s_logger.debug("The last Host, hostId: "
                        + host.getId()
                        + " already has max Running VMs(count includes system VMs), skipping this and trying other available hosts");
            } else {
                if (host.getStatus() == Status.Up && host.getResourceState() == ResourceState.Enabled) {
                    long cluster_id = host.getClusterId();
                    ClusterDetailsVO cluster_detail_cpu = _clusterDetailsDao.findDetail(cluster_id,
                            "cpuOvercommitRatio");
                    ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id,
                            "memoryOvercommitRatio");
                    Float cpuOvercommitRatio = Float.parseFloat(cluster_detail_cpu.getValue());
                    Float memoryOvercommitRatio = Float.parseFloat(cluster_detail_ram.getValue());
                    if (_capacityMgr.checkIfHostHasCapacity(host.getId(), cpu_requested, ram_requested, true,
                            cpuOvercommitRatio, memoryOvercommitRatio, true)) {
                        s_logger.debug("The last host of this VM is UP and has enough capacity");
                        s_logger.debug("Now checking for suitable pools under zone: " + host.getDataCenterId()
                                + ", pod: " + host.getPodId() + ", cluster: " + host.getClusterId());
                        // search for storage under the zone, pod, cluster of
                        // the last host.
                        DataCenterDeployment lastPlan = new DataCenterDeployment(host.getDataCenterId(),
                                host.getPodId(), host.getClusterId(), host.getId(), plan.getPoolId(), null);
                        Pair<Map<Volume, List<StoragePool>>, List<Volume>> result = findSuitablePoolsForVolumes(
                                vmProfile, lastPlan, avoids, HostAllocator.RETURN_UPTO_ALL);
                        Map<Volume, List<StoragePool>> suitableVolumeStoragePools = result.first();
                        List<Volume> readyAndReusedVolumes = result.second();

                        // choose the potential pool for this VM for this host
                        if (!suitableVolumeStoragePools.isEmpty()) {
                            List<Host> suitableHosts = new ArrayList<Host>();
                            suitableHosts.add(host);
                            Pair<Host, Map<Volume, StoragePool>> potentialResources = findPotentialDeploymentResources(
                                    suitableHosts, suitableVolumeStoragePools, avoids,
                                    getPlannerUsage(planner, vmProfile, plan, avoids), readyAndReusedVolumes);
                            if (potentialResources != null) {
                                Pod pod = _podDao.findById(host.getPodId());
                                Cluster cluster = _clusterDao.findById(host.getClusterId());
                                Map<Volume, StoragePool> storageVolMap = potentialResources.second();
                                // remove the reused vol<->pool from
                                // destination, since we don't have to prepare
                                // this volume.
                                for (Volume vol : readyAndReusedVolumes) {
View Full Code Here

        if (pod != null && Grouping.AllocationState.Disabled == pod.getAllocationState()) {
            s_logger.info("Pod is currently disabled, cannot allocate to this pod: " + podId);
            return false;
        }

        Cluster cluster = _clusterDao.findById(clusterId);
        if (cluster != null && Grouping.AllocationState.Disabled == cluster.getAllocationState()) {
            s_logger.info("Cluster is currently disabled, cannot allocate to this cluster: " + clusterId);
            return false;
        }

        return true;
View Full Code Here

        }

        // call to core process
        DataCenterVO dcVO = _dcDao.findById(destinationHost.getDataCenterId());
        HostPodVO pod = _podDao.findById(destinationHost.getPodId());
        Cluster cluster = _clusterDao.findById(destinationHost.getClusterId());
        DeployDestination dest = new DeployDestination(dcVO, pod, cluster, destinationHost);

        //check max guest vm limit for the destinationHost
        HostVO destinationHostVO = _hostDao.findById(destinationHost.getId());
        if(_capacityMgr.checkIfHostReachMaxGuestLimit(destinationHostVO)){
View Full Code Here

        responseGenerator = Mockito.mock(ResponseGenerator.class);

        addClusterCmd._resourceService = resourceService;
        addClusterCmd._responseGenerator = responseGenerator;

        Cluster cluster = Mockito.mock(Cluster.class);
        Cluster[] clusterArray = new Cluster[] { cluster };

        Mockito.doReturn(Arrays.asList(clusterArray)).when(resourceService).discoverCluster(addClusterCmd);

        addClusterCmd.execute();
View Full Code Here

TOP

Related Classes of com.cloud.org.Cluster

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.