Examples of ClusterRead


Examples of com.vmware.bdd.apitypes.ClusterRead

      }
   }

   private void printScaleReport(TaskRead taskRead, String clusterName,
         String nodeGroupName) {
      ClusterRead cluster = restClient.get(clusterName, true);
      List<NodeGroupRead> nodeGroups = cluster.getNodeGroups();
      List<NodeStatus> succeedNodes = taskRead.getSucceedNodes();
      List<NodeStatus> failedNodes = taskRead.getFailNodes();
      setNodeStatusInfo(succeedNodes, nodeGroups);
      setNodeStatusInfo(failedNodes, nodeGroups);
      LinkedHashMap<String, List<String>> columnNamesWithGetMethodNames =
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterRead

         @CliOption(key = { "maxComputeNodeNum" }, mandatory = false, help = "The maximum number of compute nodes staying powered on (valid in auto elasticity mode)") final Integer maxComputeNodeNum,
         @CliOption(key = { "targetComputeNodeNum" }, mandatory = false, help = "The number of instances powered on (valid in manual elasticity mode)") final Integer targetComputeNodeNum,
         @CliOption(key = { "ioShares" }, mandatory = false, help = "The relative disk I/O priorities: HIGH, NORNAL, LOW") final String ioShares) {
      try {
         //validate if the cluster exists
         ClusterRead cluster = restClient.get(clusterName, false);
         if (cluster == null) {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER,
                  clusterName, Constants.OUTPUT_OP_SET_PARAM,
                  Constants.OUTPUT_OP_RESULT_FAIL, "cluster " + clusterName
                        + " does not exist.");
            return;
         }

         //validate the node group type for elasticity params
         if (elasticityMode != null || minComputeNodeNum != null
               || maxComputeNodeNum != null || targetComputeNodeNum != null) {
            if (!cluster.validateSetManualElasticity()) {
               CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER,
                     clusterName, Constants.OUTPUT_OP_SET_PARAM,
                     Constants.OUTPUT_OP_RESULT_FAIL,
                     Constants.PARAM_SHOULD_HAVE_COMPUTE_ONLY_GROUP);
               return;
            }
         } else if (ioShares == null) {
            // in this case, no parameter is specified excpet "cluster name", return directly
            System.out
                  .println("There is nothing to adjust, please specify more parameters.");
            return;
         }

         ElasticityMode mode = null;
         //validate the input of elasticityMode
         if (elasticityMode != null) {
            try {
               mode = ElasticityMode.valueOf(elasticityMode.toUpperCase());
            } catch (IllegalArgumentException e) {
               CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER,
                     clusterName, Constants.OUTPUT_OP_SET_PARAM,
                     Constants.OUTPUT_OP_RESULT_FAIL, Constants.INVALID_VALUE
                           + " elasticityMode = " + elasticityMode);
               return;
            }
         }
         Boolean enableAuto = null;
         if (mode != null) {
            enableAuto = (mode == ElasticityMode.AUTO) ? true : false;
         }

         //validate the input parameters
         try {
            if (!cluster.validateSetParamParameters(targetComputeNodeNum,
                  minComputeNodeNum, maxComputeNodeNum)) {
               return;
            }
         } catch (Exception e) {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER,
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterRead

         @CliOption(key = { "maxComputeNodeNum" }, mandatory = false, specifiedDefaultValue = "true", unspecifiedDefaultValue = "false", help = "reset maxComputeNodeNum to -1") final boolean maxComputeNodeNum,
         @CliOption(key = { "targetComputeNodeNum" }, mandatory = false, specifiedDefaultValue = "true", unspecifiedDefaultValue = "false", help = "reset targetComputeNodeNum to -1(activate all compute nodes)") final boolean targetComputeNodeNum,
         @CliOption(key = { "ioShares" }, mandatory = false, specifiedDefaultValue = "true", unspecifiedDefaultValue = "false", help = "reset disk I/O priorities to LOW") final boolean ioShares) {
      try {
         //validate if the cluster exists
         ClusterRead cluster = restClient.get(clusterName, false);
         if (cluster == null) {
            CommandsUtils.printCmdFailure(Constants.OUTPUT_OBJECT_CLUSTER,
                  clusterName, Constants.OUTPUT_OP_RESET_PARAM,
                  Constants.OUTPUT_OP_RESULT_FAIL, "cluster " + clusterName
                        + " does not exist.");
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterRead

   @CliCommand(value = "cluster target", help = "Set or query target cluster to run commands")
   public void targetCluster(
         @CliOption(key = { "name" }, mandatory = false, help = "The cluster name") final String name,
         @CliOption(key = { "info" }, mandatory = false, specifiedDefaultValue = "true", unspecifiedDefaultValue = "false", help = "flag to show target information") final boolean info) {

      ClusterRead cluster = null;
      boolean noCluster = false;
      try {
         if (info) {
            if (name != null) {
               System.out
                     .println("Warning: can't specify option --name and --info at the same time");
               return;
            }
            String fsUrl = hadoopConfiguration.get("fs.default.name");
            String jtUrl = hadoopConfiguration.get("mapred.job.tracker");
            if ((fsUrl == null || fsUrl.length() == 0)
                  && (jtUrl == null || jtUrl.length() == 0)) {
               System.out
                     .println("There is no targeted cluster. Run \"cluster target --name\" command first.");
               return;
            }
            if (targetClusterName != null && targetClusterName.length() > 0) {
               System.out.println("Cluster         : " + targetClusterName);
            }
            if (fsUrl != null && fsUrl.length() > 0) {
               System.out.println("HDFS url        : " + fsUrl);
            }
            if (jtUrl != null && jtUrl.length() > 0) {
               System.out.println("Job Tracker url : " + jtUrl);
            }
            if (hiveServerUrl != null && hiveServerUrl.length() > 0) {
               System.out.println("Hive server info: " + hiveServerUrl);
            }
         } else {
            if (name == null) {
               ClusterRead[] clusters = restClient.getAll(false);
               if (clusters != null && clusters.length > 0) {
                  cluster = clusters[0];
               } else {
                  noCluster = true;
               }
            } else {
               cluster = restClient.get(name, false);
            }

            if (cluster == null) {
               if (noCluster) {
                  System.out
                        .println("There is no available cluster for targeting.");
               } else {
                  System.out.println("Failed to target cluster: The cluster "
                        + name + " not found");
               }
               setFsURL("");
               setJobTrackerURL("");
               this.setHiveServerUrl("");
            } else {
               targetClusterName = cluster.getName();
               boolean hasHDFS = false;
               boolean hasHiveServer = false;
               for (NodeGroupRead nodeGroup : cluster.getNodeGroups()) {
                  for (String role : nodeGroup.getRoles()) {
                     if ("hadoop_namenode".equals(role)) {
                        List<NodeRead> nodes = nodeGroup.getInstances();
                        if (nodes != null && nodes.size() > 0) {
                           String nameNodeIP = nodes.get(0).fetchMgtIp();
                           setNameNode(nameNodeIP);
                           hasHDFS = true;
                        } else {
                           throw new CliRestException("no name node available");
                        }
                     }
                     if ("hadoop_jobtracker".equals(role)) {
                        List<NodeRead> nodes = nodeGroup.getInstances();
                        if (nodes != null && nodes.size() > 0) {
                           String jobTrackerIP = nodes.get(0).fetchMgtIp();
                           setJobTracker(jobTrackerIP);
                        } else {
                           throw new CliRestException(
                                 "no job tracker available");
                        }
                     }
                     if ("hive_server".equals(role)) {
                        List<NodeRead> nodes = nodeGroup.getInstances();
                        if (nodes != null && nodes.size() > 0) {
                           String hiveServerIP = nodes.get(0).fetchMgtIp();
                           setHiveServerAddress(hiveServerIP);
                           hasHiveServer = true;
                        } else {
                           throw new CliRestException(
                                 "no hive server available");
                        }
                     }
                  }
               }
               if (cluster.getExternalHDFS() != null
                     && !cluster.getExternalHDFS().isEmpty()) {
                  setFsURL(cluster.getExternalHDFS());
                  hasHDFS = true;
               }
               if (!hasHDFS) {
                  setFsURL("");
               }
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterRead

               Constants.PARAM_CLUSTER
                     + Constants.PARAM_NOT_CONTAIN_HORIZONTAL_LINE);
         return;
      }
      try {
         ClusterRead clusterRead = restClient.get(name, false);
         // build ClusterCreate object
         ClusterCreate clusterConfig = new ClusterCreate();
         clusterConfig.setName(clusterRead.getName());
         ClusterCreate clusterSpec =
               CommandsUtils.getObjectByJsonString(ClusterCreate.class,
                     CommandsUtils.dataFromFile(specFilePath));
         clusterConfig.setNodeGroups(clusterSpec.getNodeGroups());
         clusterConfig.setConfiguration(clusterSpec.getConfiguration());
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterRead

      }
   }

   private void printClusterFixReport(TaskRead taskRead, String clusterName)
         throws Exception {
      ClusterRead cluster = restClient.get(clusterName, true);
      List<NodeGroupRead> nodeGroups = cluster.getNodeGroups();
      List<NodeStatus> succeedNodes = taskRead.getSucceedNodes();
      List<NodeStatus> failedNodes = taskRead.getFailNodes();
      setNodeStatusInfo(succeedNodes, nodeGroups);
      System.out.println("The fixed nodes: " + succeedNodes.size());
      LinkedHashMap<String, List<String>> columnNamesWithGetMethodNames =
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterRead

      ngr1.setInstanceNum(6);
      ngr1.setRoles(roles1);
      ngr1.setComputeOnly(defaultSoftwareManager.isComputeOnlyRoles(ngr1.getRoles()));
      List<NodeGroupRead> nodeGroupRead = new LinkedList<NodeGroupRead>();
      nodeGroupRead.add(ngr1);
      ClusterRead cluster = new ClusterRead();
      cluster.setNodeGroups(nodeGroupRead);
      cluster.setVhmMinNum(-1);
      cluster.setVhmMaxNum(-1);

      try {
         cluster.validateSetParamParameters(null, -2, null);
         fail();
      } catch (BddException e) {
         assertEquals("Invalid value: minComputeNodeNum=-2. Value must be less than or equal to the number of compute-only nodes (6) and less than or equal to maxComputeNodeNum.", e.getMessage());
      }

      try {
         cluster.validateSetParamParameters(null, null, -2);
         fail();
      } catch (BddException e) {
         assertEquals("Invalid value: maxComputeNodeNum=-2. Value must be less than or equal to the number of compute-only nodes (6) and greater than or equal to minComputeNodeNum.", e.getMessage());
      }

      try {
         cluster.validateSetParamParameters(9, null, null);
         fail();
      } catch (BddException e) {
         assertEquals("Invalid value: targetComputeNodeNum=9. Value must be less than or equal to the number of compute-only nodes (6).", e.getMessage());
      }

      try {
         cluster.validateSetParamParameters(null, 6, 1);
         fail();
      } catch (BddException e) {
         assertEquals("Invalid value: minComputeNodeNum=6. Value must be less than or equal to the number of compute-only nodes (6) and less than or equal to maxComputeNodeNum (1).", e.getMessage());
      }

      cluster.setVhmMinNum(6);
      try {
         cluster.validateSetParamParameters(null, null, 5);
         fail();
      } catch (BddException e) {
         assertEquals("Invalid value: maxComputeNodeNum=5. Value must be less than or equal to the number of compute-only nodes (6) and greater than or equal to minComputeNodeNum (6).", e.getMessage());
      }

      cluster.setVhmMaxNum(1);
      try {
         cluster.validateSetParamParameters(null, 6, null);
         fail();
      } catch (BddException e) {
         assertEquals("Invalid value: minComputeNodeNum=6. Value must be less than or equal to the number of compute-only nodes (6) and less than or equal to maxComputeNodeNum (1).", e.getMessage());
      }

      //test will fail if Exception is thrown out
      cluster.setVhmMinNum(0);
      assertEquals(true, cluster.validateSetParamParameters(null, null, -1));
      assertEquals(true, cluster.validateSetParamParameters(null, null, 0));
      assertEquals(true, cluster.validateSetParamParameters(null, null, 1));

      cluster.setVhmMinNum(-1);
      assertEquals(true, cluster.validateSetParamParameters(null, null, -1));
      assertEquals(true, cluster.validateSetParamParameters(null, null, 0));
      assertEquals(true, cluster.validateSetParamParameters(null, null, 1));

      cluster.setVhmMinNum(0);
      assertEquals(true, cluster.validateSetParamParameters(null, -1, null));
      cluster.setVhmMinNum(0);
      assertEquals(true, cluster.validateSetParamParameters(null, 0, null));
      cluster.setVhmMinNum(0);
      assertEquals(true, cluster.validateSetParamParameters(null, 1, null));

      cluster.setVhmMinNum(-1);
      assertEquals(true, cluster.validateSetParamParameters(null, -1, null));
      cluster.setVhmMinNum(-1);
      assertEquals(true, cluster.validateSetParamParameters(null, 0, null));
      cluster.setVhmMinNum(-1);
      assertEquals(true, cluster.validateSetParamParameters(null, 1, null));


      cluster.setVhmMaxNum(0);
      assertEquals(true, cluster.validateSetParamParameters(null, null, -1));
      cluster.setVhmMaxNum(0);
      assertEquals(true, cluster.validateSetParamParameters(null, null, 0));
      cluster.setVhmMaxNum(0);
      assertEquals(true, cluster.validateSetParamParameters(null, null, 1));

      cluster.setVhmMaxNum(-1);
      assertEquals(true, cluster.validateSetParamParameters(null, null, -1));
      cluster.setVhmMaxNum(-1);
      assertEquals(true, cluster.validateSetParamParameters(null, null, 0));
      cluster.setVhmMaxNum(-1);
      assertEquals(true, cluster.validateSetParamParameters(null, null, 1));

      cluster.setVhmMaxNum(0);
      assertEquals(true, cluster.validateSetParamParameters(null, -1, null));
      assertEquals(true, cluster.validateSetParamParameters(null, 0, null));
      //assertEquals(true, cluster.validateSetParamParameters(null, 1, null));

      cluster.setVhmMaxNum(-1);
      assertEquals(true, cluster.validateSetParamParameters(null, -1, null));
      assertEquals(true, cluster.validateSetParamParameters(null, 0, null));
      assertEquals(true, cluster.validateSetParamParameters(null, 1, null));

      cluster.validateSetParamParameters(null, null, null);

      cluster.validateSetParamParameters(2, null, null);
      cluster.validateSetParamParameters(null, 1, 5);
   }
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterRead

         System.out.println(c.getId());
      }
      cluster = clusterEntityMgr.findByName("my-cluster-external-hdfs");
      Assert.assertTrue(cluster != null);
      Assert.assertEquals(cluster.getAdvancedProperties(), "{\"ExternalHDFS\":\"hdfs://168.192.0.70:8020\"}");
      ClusterRead clusterRead = clusterEntityMgr.toClusterRead("my-cluster-external-hdfs");
      Assert.assertEquals(clusterRead.getExternalHDFS(), "hdfs://168.192.0.70:8020");

      ClusterCreate attrs =
            clusterConfigMgr.getClusterConfig("my-cluster-external-hdfs");
      String manifest = gson.toJson(attrs);
      System.out.println(manifest);
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterRead

      Assert.assertTrue(cluster != null);
      Assert.assertEquals(
            cluster.getAdvancedProperties(),
            "{\"ExternalMapReduce\":\"192.168.0.1:8021\",\"ExternalHDFS\":\"hdfs://192.168.0.2:8020\"}");
      ClusterRead clusterRead =
            clusterEntityMgr.toClusterRead("my-cluster-external-mr");
      Assert.assertEquals(clusterRead.getExternalHDFS(),
            "hdfs://192.168.0.2:8020");
      Assert.assertEquals(clusterRead.getExternalMapReduce(),
            "192.168.0.1:8021");
   }
View Full Code Here

Examples of com.vmware.bdd.apitypes.ClusterRead

   }

   @SuppressWarnings("unchecked")
   @Test
   public void testValidateSetManualElasticity() {
      ClusterRead cluster = new ClusterRead();
      cluster.setDistroVendor(Constants.MAPR_VENDOR);
      NodeGroupRead compute = new NodeGroupRead();
      compute.setName("compute");
      compute.setRoles(Arrays.asList("mapr_tasktracker"));
      compute.setComputeOnly(defaultSoftwareManager.isComputeOnlyRoles(compute.getRoles()));
      cluster.setNodeGroups(Arrays.asList(compute));
      assertEquals(true, cluster.validateSetManualElasticity());
      compute.setRoles(Arrays.asList(
            "mapr_tasktracker",
      "mapr_nfs"));
      compute.setComputeOnly(defaultSoftwareManager.isComputeOnlyRoles(compute.getRoles()));
      cluster.setNodeGroups(Arrays.asList(compute));
      assertEquals(false,
            cluster.validateSetManualElasticity(Arrays.asList("compute")));
      cluster.setDistroVendor(Constants.APACHE_VENDOR);
      compute.setRoles(Arrays.asList("hadoop_tasktracker"));
      compute.setComputeOnly(defaultSoftwareManager.isComputeOnlyRoles(compute.getRoles()));
      cluster.setNodeGroups(Arrays.asList(compute));
      assertEquals(true, cluster.validateSetManualElasticity());
      compute.setRoles(Arrays.asList("hadoop_tasktracker",
      "tempfs_client"));
      compute.setComputeOnly(defaultSoftwareManager.isComputeOnlyRoles(compute.getRoles()));
      cluster.setNodeGroups(Arrays.asList(compute));
      assertEquals(true, cluster.validateSetManualElasticity());
   }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.