Package com.vmware.bdd.entity

Examples of com.vmware.bdd.entity.ClusterEntity


            && !validateLocalRepoURL(localRepoURL)) {
         throw ClusterConfigException.INVALID_LOCAL_REPO_URL(failedMsgList);
      }

      try {
         ClusterEntity entity = clusterEntityMgr.findByName(name);
         if (entity != null) {
            logger.info("can not create cluster " + name
                  + ", which is already existed.");
            throw BddException.ALREADY_EXISTS("Cluster", name);
         }

         updateInfrastructure(cluster, softwareManager, blueprint);
         // persist cluster config
         logger.debug("begin to add cluster config for " + name);
         Gson gson = new Gson();
         ClusterEntity clusterEntity = new ClusterEntity(name);
         clusterEntity.setAppManager(cluster.getAppManager());
         clusterEntity.setDistro(cluster.getDistro());
         clusterEntity.setDistroVendor(cluster.getDistroVendor());
         clusterEntity.setDistroVersion(cluster.getDistroVersion());
         clusterEntity.setStartAfterDeploy(true);
         clusterEntity.setPassword(cluster.getPassword());

         // set cluster version
         clusterEntity.setVersion(clusterEntityMgr.getServerVersion());

         if (cluster.containsComputeOnlyNodeGroups(softwareManager)) {
            clusterEntity.setAutomationEnable(automationEnable);
         } else {
            clusterEntity.setAutomationEnable(null);
         }
         clusterEntity.setVhmMinNum(-1);
         clusterEntity.setVhmMaxNum(-1);

         if (cluster.getRpNames() != null && cluster.getRpNames().size() > 0) {
            logger.debug("resource pool " + cluster.getRpNames()
                  + " specified for cluster " + name);
            clusterEntity.setVcRpNameList(cluster.getRpNames());
         } else {
            logger.debug("no resource pool name specified, use global configuration.");
         }
         if (cluster.getDsNames() != null && !cluster.getDsNames().isEmpty()) {
            logger.debug("datastore " + cluster.getDsNames()
                  + " specified for cluster " + name);
            clusterEntity.setVcDatastoreNameList(cluster.getDsNames());
         } else {
            logger.debug("no datastore name specified, use global configuration.");
         }

         clusterEntity.setNetworkConfig(validateAndConvertNetNamesToNetConfigs(
               cluster.getNetworkConfig(), cluster.getDistroVendor()
                     .equalsIgnoreCase(Constants.MAPR_VENDOR)));
         clusterEntity.setVhmJobTrackerPort("50030");
         if (cluster.getConfiguration() != null
               && cluster.getConfiguration().size() > 0) {
            clusterEntity.setHadoopConfig((new Gson()).toJson(cluster
                  .getConfiguration()));

            updateVhmJobTrackerPort(cluster, clusterEntity);
         }

         setAdvancedProperties(cluster.getExternalHDFS(),
               cluster.getExternalMapReduce(), localRepoURL, clusterEntity);
         NodeGroupCreate[] groups = cluster.getNodeGroups();
         if (groups != null && groups.length > 0) {
            clusterEntity.setNodeGroups(convertNodeGroupsToEntities(gson,
                  clusterEntity, cluster.getDistro(), groups,
                  cluster.isValidateConfig()));

            //make sure memory size is no less than MIN_MEM_SIZE
            validateMemorySize(clusterEntity.getNodeGroups(), failedMsgList);
            if (!failedMsgList.isEmpty()) {
               throw ClusterConfigException.INVALID_SPEC(failedMsgList);
            }
         }

         if (cluster.getTopologyPolicy() == null) {
            clusterEntity.setTopologyPolicy(TopologyType.NONE);
         } else {
            clusterEntity.setTopologyPolicy(cluster.getTopologyPolicy());
         }

         if (clusterEntity.getTopologyPolicy() == TopologyType.HVE) {
            boolean hveSupported = false;
            if (clusterEntity.getDistro() != null) {
               HadoopStack hadoopStack =
                     filterDistroFromAppManager(softwareManager,
                           clusterEntity.getDistro());
               if (hadoopStack != null) {
                  hveSupported = hadoopStack.isHveSupported();
               }
            }
            if (!hveSupported) {
               throw ClusterConfigException.INVALID_TOPOLOGY_POLICY(
                     clusterEntity.getTopologyPolicy(),
                     "current Hadoop distribution does not support HVE.");
            }
         }

         clusterEntityMgr.insert(clusterEntity);
View Full Code Here


      return getClusterConfig(clusterName, true);
   }

   @Transactional(readOnly = true)
   public ClusterCreate getClusterConfig(String clusterName, boolean needAllocIp) {
      ClusterEntity clusterEntity = clusterEntityMgr.findByName(clusterName);
      if (clusterEntity == null) {
         throw ClusterConfigException.CLUSTER_CONFIG_NOT_FOUND(clusterName);
      }
      ClusterCreate clusterConfig = new ClusterCreate();
      clusterConfig.setName(clusterEntity.getName());
      clusterConfig.setAppManager(clusterEntity.getAppManager());
      clusterConfig.setDistro(clusterEntity.getDistro());
      convertClusterConfig(clusterEntity, clusterConfig, needAllocIp);

      Gson gson =
            new GsonBuilder().excludeFieldsWithoutExposeAnnotation().create();
      String manifest = gson.toJson(clusterConfig);
View Full Code Here

   @Transactional
   public void updateAppConfig(String clusterName, ClusterCreate clusterCreate) {
      logger.debug("Update configuration for cluster " + clusterName);

      ClusterEntity cluster = clusterEntityMgr.findByName(clusterName);

      if (cluster == null) {
         logger.error("cluster " + clusterName + " does not exist");
         throw BddException.NOT_FOUND("Cluster", clusterName);
      }
      SoftwareManager softwareManager =
            getSoftwareManager(cluster.getAppManager());
      // read distro and distroVersion from ClusterEntity and set to ClusterCreate
      clusterCreate.setDistro(cluster.getDistro());
      clusterCreate.setDistroVersion(cluster.getDistroVersion());
      if (!CommonUtil.isBlank(cluster.getAdvancedProperties())) {
         Gson gson = new Gson();
         Map<String, String> advancedProperties =
               gson.fromJson(cluster.getAdvancedProperties(), Map.class);
         clusterCreate.setExternalHDFS(advancedProperties.get("ExternalHDFS"));
         clusterCreate.setExternalMapReduce(advancedProperties
               .get("ExternalMapReduce"));
         clusterCreate.setLocalRepoURL(advancedProperties.get("LocalRepoURL"));
      }
      // only check roles validity in server side, but not in CLI and GUI, because roles info exist in server side.
      ClusterBlueprint blueprint = clusterCreate.toBlueprint();
      try {
         softwareManager.validateBlueprint(blueprint);
      } catch (ValidationException e) {
         throw ClusterConfigException.INVALID_SPEC(e.getFailedMsgList());
      }

      updateInfrastructure(clusterCreate, softwareManager, blueprint);
      Map<String, Object> clusterLevelConfig = clusterCreate.getConfiguration();

      if (clusterLevelConfig != null && clusterLevelConfig.size() > 0) {
         logger.debug("Cluster level app config is updated.");
         cluster.setHadoopConfig((new Gson()).toJson(clusterLevelConfig));
         updateVhmJobTrackerPort(clusterCreate, cluster);
      } else {
         logger.debug("cluster configuration is not set in cluster spec, so treat it as an empty configuration.");
         cluster.setHadoopConfig(null);
      }
      setAdvancedProperties(clusterCreate.getExternalHDFS(),
            clusterCreate.getExternalMapReduce(),
            clusterCreate.getLocalRepoURL(), cluster);
      updateNodegroupAppConfig(clusterCreate, cluster,
View Full Code Here

         }

         for (Iterator<String> ite = clusterList.iterator(); ite.hasNext(); ) {
            String clusterName = ite.next();
            try {
               ClusterEntity cluster =
                     lockedEntityManager.getClusterEntityMgr().findByName(
                           clusterName);
               if (cluster == null) {
                  logger.info("Cluster " + clusterName
                        + " does not exist, stop sync up for it.");
                  ite.remove();
                  continue;
               }

               if (!cluster.getStatus().isSyncServiceStatus()) {
                  logger.debug("Cluster " + clusterName + " is in status "
                        + cluster.getStatus());
                  logger.debug("Stop sync up for it");
                  ite.remove();
                  continue;
               }
               ClusterBlueprint blueprint =
View Full Code Here

      return clusterDao.findAll();
   }

   public List<NodeGroupEntity> findAllGroups(String clusterName) {
      List<ClusterEntity> clusters = new ArrayList<ClusterEntity>();
      ClusterEntity cluster = clusterDao.findByName(clusterName);
      clusters.add(cluster);

      return nodeGroupDao.findAllByClusters(clusters);
   }
View Full Code Here

         OperationStatusWithDetail status, boolean lastUpdate) {
      logger.info("handle operation status: " + status.getOperationStatus());
      boolean finished = status.getOperationStatus().isFinished();
      final Map<String, GroupData> groups = status.getClusterData().getGroups();

      ClusterEntity cluster = findByName(clusterName);
      AuAssert.check(cluster.getId() != null);
      for (NodeGroupEntity group : cluster.getNodeGroups()) {
         for (String groupName : groups.keySet()) {
            if (groupName.equals(group.getName())) {
               for (ServerData serverData : groups.get(groupName)
                     .getInstances()) {
                  logger.debug("server data: " + serverData.getName()
View Full Code Here

      // process node status
      handleNodeStatus(report, true);
   }

   private void handleClusterStatus(String clusterName, ClusterReport report) {
      ClusterEntity cluster = findByName(clusterName);
      ClusterStatus oldState = cluster.getStatus();
      switch (oldState) {
      case RUNNING:
      case SERVICE_STOPPED:
      case SERVICE_WARNING:
         switch (report.getStatus()) {
         case STARTED:
            cluster.setStatus(ClusterStatus.RUNNING);
            break;
         case ALERT:
            cluster.setStatus(ClusterStatus.SERVICE_WARNING);
            break;
         case STOPPED:
            cluster.setStatus(ClusterStatus.SERVICE_STOPPED);
            break;
         default:
            break;
         }
         logger.info("Got status " + report.getStatus()
               + ", change cluster status from " + oldState + " to "
               + cluster.getStatus());
         break;
      default:
         logger.debug("In status "
               + cluster.getStatus()
               + ". Do not change cluster status based on service status change.");
         break;
      }
   }
View Full Code Here

      return handleNodeStatus(report, lastUpdate);
   }

   private boolean handleNodeStatus(ClusterReport report, boolean lastUpdate) {
      boolean finished = report.isFinished();
      ClusterEntity cluster = findByName(report.getName());
      Map<String, NodeReport> nodeReportMap = report.getNodeReports();
      for (NodeGroupEntity group : cluster.getNodeGroups()) {
         for (NodeEntity node : group.getNodes()) {
            NodeReport nodeReport = nodeReportMap.get(node.getVmName());
            if (nodeReport == null) {
               continue;
            }
View Full Code Here

            allNodesDown = false;
         }
      }

      if (updateClusterStatus && allNodesDown) {
         ClusterEntity cluster = findByName(clusterName);
         if (cluster.getStatus() == ClusterStatus.RUNNING) {
            logger.info("All nodes are powered off, switch cluster status to stopped.");
            cluster.setStatus(ClusterStatus.STOPPED);
         }
      }
   }
View Full Code Here

   }

   @Transactional
   @RetryTransaction
   public List<String> getPortGroupNames(String clusterName) {
      ClusterEntity clusterEntity = clusterDao.findByName(clusterName);
      List<String> portGroups = new ArrayList<String>();
      for (String networkName : clusterEntity.fetchNetworkNameList()) {
         portGroups.add(networkDAO.findNetworkByName(networkName)
               .getPortGroup());
      }
      return portGroups;
   }
View Full Code Here

TOP

Related Classes of com.vmware.bdd.entity.ClusterEntity

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.