Package com.vmware.bdd.software.mgmt.plugin.model

Examples of com.vmware.bdd.software.mgmt.plugin.model.HadoopStack


      Assert.assertNotNull(apiManager.getRequestWithTasks(clusterName, 1L).getApiTasks());
   }

   @Test
   public void testGetClusterStatus() throws Exception {
      HadoopStack stack = new HadoopStack();
      stack.setVendor("HDP");
      stack.setFullVersion("2.1");
      Assert.assertEquals("Started", apiManager.getClusterStatus(clusterName, stack).toString());
   }
View Full Code Here


         throw BddException.INVALID_PARAMETER("appmanager name", appManagerName);
      }
      SoftwareManager softMgr =
            softwareManagerCollector.getSoftwareManager(appManagerName);
      List<HadoopStack> stacks = softMgr.getSupportedStacks();
      HadoopStack hadoopStack = null;
      for (HadoopStack stack : stacks) {
         if (distroName.equals(stack.getDistro())) {
            hadoopStack = stack;
            break;
         }
      }
      if (hadoopStack == null) {
         throw BddException.NOT_FOUND("Distro", distroName);
      } else {
         return hadoopStack.getRoles();
      }
   }
View Full Code Here

      }

      SoftwareManager softMgr =
            softwareManagerCollector.getSoftwareManager(appManagerName);
      List<HadoopStack> stacks = softMgr.getSupportedStacks();
      HadoopStack hadoopStack = null;
      for (HadoopStack stack : stacks) {
         if (distroName.equals(stack.getDistro())) {
            hadoopStack = stack;
            break;
         }
View Full Code Here

      if (CommonUtil.isBlank(appManagerName)
            || !CommonUtil.validateResourceName(appManagerName)) {
         throw BddException.INVALID_PARAMETER("appmanager name", appManagerName);
      }
      SoftwareManager softMgr = softwareManagerCollector.getSoftwareManager(appManagerName);
      HadoopStack stack = softMgr.getDefaultStack();
      if (stack == null) {
         return null;
      } else {
         return new DistroRead(stack);
      }
View Full Code Here

      if (CommonUtil.isBlank(appManagerName)
            || !CommonUtil.validateResourceName(appManagerName)) {
         throw BddException.INVALID_PARAMETER("appmanager name", appManagerName);
      }
      SoftwareManager softMgr = softwareManagerCollector.getSoftwareManager(appManagerName);
      HadoopStack stack = clusterMgr.filterDistroFromAppManager(softMgr, distroName);
      if (stack == null) {
         return null;
      } else {
         return new DistroRead(stack);
      }
View Full Code Here

   }

   public Long createCluster(ClusterCreate createSpec) throws Exception {
      SoftwareManager softMgr = softwareManagerCollector.getSoftwareManager(createSpec.getAppManager());
      // @ Todo if specify hadoop stack, we can get hadoop stack by stack name. Otherwise, we will get a default hadoop stack.
      HadoopStack stack = clusterConfigMgr.filterDistroFromAppManager(softMgr, createSpec.getDistro());

      // if the distro is not specified by the REST Client, add it.
      if(CommonUtil.isBlank(createSpec.getDistro())) {
         createSpec.setDistro(stack.getDistro());
      }
      createSpec.setDistroVendor(stack.getVendor());
      createSpec.setDistroVersion(stack.getFullVersion());

      // create auto rps if vc cluster/rp is specified
      createAutoRps(createSpec);
      ClusterCreate clusterSpec =
            ClusterSpecFactory.getCustomizedSpec(createSpec, softMgr.getType());
View Full Code Here

      String appManager = cluster.getAppManager();
      if (appManager == null) {
         appManager = Constants.IRONFAN;
      }
      SoftwareManager softwareManager = getSoftwareManager(appManager);
      HadoopStack stack =
            filterDistroFromAppManager(softwareManager, cluster.getDistro());
      if (cluster.getDistro() == null || stack == null) {
         throw BddException.INVALID_PARAMETER("distro", cluster.getDistro());
      }
      // only check roles validity in server side, but not in CLI and GUI, because roles info exist in server side.
      ClusterBlueprint blueprint = cluster.toBlueprint();
      try {
         softwareManager.validateBlueprint(cluster.toBlueprint());
         cluster.validateClusterCreate(failedMsgList, warningMsgList);
      } catch (ValidationException e) {
         failedMsgList.addAll(e.getFailedMsgList());
         warningMsgList.addAll(e.getWarningMsgList());
      }

      if (!failedMsgList.isEmpty()) {
         throw ClusterConfigException.INVALID_SPEC(failedMsgList);
      }

      if (!validateRacksInfo(cluster, failedMsgList)) {
         throw ClusterConfigException.INVALID_PLACEMENT_POLICIES(failedMsgList);
      }

      String localRepoURL = cluster.getLocalRepoURL();
      if (!CommonUtil.isBlank(localRepoURL)
            && !validateLocalRepoURL(localRepoURL)) {
         throw ClusterConfigException.INVALID_LOCAL_REPO_URL(failedMsgList);
      }

      try {
         ClusterEntity entity = clusterEntityMgr.findByName(name);
         if (entity != null) {
            logger.info("can not create cluster " + name
                  + ", which is already existed.");
            throw BddException.ALREADY_EXISTS("Cluster", name);
         }

         updateInfrastructure(cluster, softwareManager, blueprint);
         // persist cluster config
         logger.debug("begin to add cluster config for " + name);
         Gson gson = new Gson();
         ClusterEntity clusterEntity = new ClusterEntity(name);
         clusterEntity.setAppManager(cluster.getAppManager());
         clusterEntity.setDistro(cluster.getDistro());
         clusterEntity.setDistroVendor(cluster.getDistroVendor());
         clusterEntity.setDistroVersion(cluster.getDistroVersion());
         clusterEntity.setStartAfterDeploy(true);
         clusterEntity.setPassword(cluster.getPassword());

         // set cluster version
         clusterEntity.setVersion(clusterEntityMgr.getServerVersion());

         if (cluster.containsComputeOnlyNodeGroups(softwareManager)) {
            clusterEntity.setAutomationEnable(automationEnable);
         } else {
            clusterEntity.setAutomationEnable(null);
         }
         clusterEntity.setVhmMinNum(-1);
         clusterEntity.setVhmMaxNum(-1);

         if (cluster.getRpNames() != null && cluster.getRpNames().size() > 0) {
            logger.debug("resource pool " + cluster.getRpNames()
                  + " specified for cluster " + name);
            clusterEntity.setVcRpNameList(cluster.getRpNames());
         } else {
            logger.debug("no resource pool name specified, use global configuration.");
         }
         if (cluster.getDsNames() != null && !cluster.getDsNames().isEmpty()) {
            logger.debug("datastore " + cluster.getDsNames()
                  + " specified for cluster " + name);
            clusterEntity.setVcDatastoreNameList(cluster.getDsNames());
         } else {
            logger.debug("no datastore name specified, use global configuration.");
         }

         clusterEntity.setNetworkConfig(validateAndConvertNetNamesToNetConfigs(
               cluster.getNetworkConfig(), cluster.getDistroVendor()
                     .equalsIgnoreCase(Constants.MAPR_VENDOR)));
         clusterEntity.setVhmJobTrackerPort("50030");
         if (cluster.getConfiguration() != null
               && cluster.getConfiguration().size() > 0) {
            clusterEntity.setHadoopConfig((new Gson()).toJson(cluster
                  .getConfiguration()));

            updateVhmJobTrackerPort(cluster, clusterEntity);
         }

         setAdvancedProperties(cluster.getExternalHDFS(),
               cluster.getExternalMapReduce(), localRepoURL, clusterEntity);
         NodeGroupCreate[] groups = cluster.getNodeGroups();
         if (groups != null && groups.length > 0) {
            clusterEntity.setNodeGroups(convertNodeGroupsToEntities(gson,
                  clusterEntity, cluster.getDistro(), groups,
                  cluster.isValidateConfig()));

            //make sure memory size is no less than MIN_MEM_SIZE
            validateMemorySize(clusterEntity.getNodeGroups(), failedMsgList);
            if (!failedMsgList.isEmpty()) {
               throw ClusterConfigException.INVALID_SPEC(failedMsgList);
            }
         }

         if (cluster.getTopologyPolicy() == null) {
            clusterEntity.setTopologyPolicy(TopologyType.NONE);
         } else {
            clusterEntity.setTopologyPolicy(cluster.getTopologyPolicy());
         }

         if (clusterEntity.getTopologyPolicy() == TopologyType.HVE) {
            boolean hveSupported = false;
            if (clusterEntity.getDistro() != null) {
               HadoopStack hadoopStack =
                     filterDistroFromAppManager(softwareManager,
                           clusterEntity.getDistro());
               if (hadoopStack != null) {
                  hveSupported = hadoopStack.isHveSupported();
               }
            }
            if (!hveSupported) {
               throw ClusterConfigException.INVALID_TOPOLOGY_POLICY(
                     clusterEntity.getTopologyPolicy(),
View Full Code Here

               gson.fromJson(clusterEntity.getHadoopConfig(), Map.class);
         blueprint.setConfiguration(clusterConfigs);
      }

      // set HadoopStack
      HadoopStack hadoopStack = new HadoopStack();
      hadoopStack.setDistro(clusterEntity.getDistro());
      hadoopStack.setVendor(clusterEntity.getDistroVendor());
      hadoopStack.setFullVersion(clusterEntity.getDistroVersion());
      blueprint.setHadoopStack(hadoopStack);

      // set nodes/nodegroups
      List<NodeGroupInfo> nodeGroupInfos = new ArrayList<NodeGroupInfo>();
      for (NodeGroupEntity group : clusterEntity.getNodeGroups()) {
View Full Code Here

   }

   @Test
   public void testValidateInfraSettingsBasedOnRole() {
      ClusterBlueprint blueprint = new ClusterBlueprint();
      blueprint.setHadoopStack(new HadoopStack());
      blueprint.getHadoopStack().setDistro(Constants.DEFAULT_VENDOR);

      Map<NetTrafficType, List<String>> networkConfig = new HashMap<NetTrafficType, List<String>>();

      List<String> mgtnets = new ArrayList<String>();
View Full Code Here

   }

   @Test
   public void testHasComputeMasterGroup() {
      ClusterBlueprint blueprint = new ClusterBlueprint();
      HadoopStack hadoopStack = new HadoopStack();
      hadoopStack.setVendor(Constants.DEFAULT_VENDOR);
      blueprint.setHadoopStack(hadoopStack);
      NodeGroupInfo compute = new NodeGroupInfo();
      compute.setRoles(Arrays.asList(HadoopRole.HADOOP_NODEMANAGER_ROLE.toString()));
      List<NodeGroupInfo> nodeGroupInfos = new ArrayList<NodeGroupInfo>();
      nodeGroupInfos.add(compute);
View Full Code Here

TOP

Related Classes of com.vmware.bdd.software.mgmt.plugin.model.HadoopStack

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.