Package org.apache.ambari.server.state

Examples of org.apache.ambari.server.state.ServiceInfo


      String stackVersion = clusterInfo.get("stackVersion");

      List<ClusterServiceEntity> clusterServiceEntities = clusterServiceDAO.findAll();
      for (final ClusterServiceEntity clusterServiceEntity : clusterServiceEntities) {
        String serviceName = clusterServiceEntity.getServiceName();
        ServiceInfo serviceInfo = null;
        try {
          serviceInfo = ambariMetaInfo.getService(stackName, stackVersion, serviceName);
        } catch (AmbariException e) {
          LOG.error("Service " + serviceName + " not found for " + stackName + stackVersion);
          continue;
        }
        List<String> configTypes = serviceInfo.getConfigDependencies();
        if (configTypes != null) {
          for (String configType : configTypes) {
            if (configType.contains(log4jConfigTypeContains)) {
              ClusterConfigEntityPK configEntityPK = new ClusterConfigEntityPK();
              configEntityPK.setClusterId(clusterId);
              configEntityPK.setType(configType);
              configEntityPK.setTag(defaultVersionTag);
              ClusterConfigEntity configEntity = clusterDAO.findConfig(configEntityPK);

              if (configEntity == null) {
                String filename = configType + ".xml";
                Map<String, String> properties = new HashMap<String, String>();
                for (PropertyInfo propertyInfo : serviceInfo.getProperties()) {
                  if (filename.equals(propertyInfo.getFilename())) {
                    properties.put(propertyInfo.getName(), propertyInfo.getValue());
                  }
                }
                if (!properties.isEmpty()) {
View Full Code Here


    String serviceName = scHost.getServiceName();
    String componentName = event.getServiceComponentName();
    String hostname = scHost.getHostName();
    String osType = clusters.getHost(hostname).getOsType();
    StackId stackId = cluster.getDesiredStackVersion();
    ServiceInfo serviceInfo = ambariMetaInfo.getServiceInfo(stackId.getStackName(),
      stackId.getStackVersion(), serviceName);
    ComponentInfo componentInfo = ambariMetaInfo.getComponent(
      stackId.getStackName(), stackId.getStackVersion(),
      serviceName, componentName);
    StackInfo stackInfo = ambariMetaInfo.getStackInfo(stackId.getStackName(),
      stackId.getStackVersion());

    ExecutionCommand execCmd = stage.getExecutionCommandWrapper(scHost.getHostName(),
      scHost.getServiceComponentName()).getExecutionCommand();

    Host host = clusters.getHost(scHost.getHostName());

    // Hack - Remove passwords from configs
    if (event.getServiceComponentName().equals(Role.HIVE_CLIENT.toString())) {
      configHelper.applyCustomConfig(configurations, Configuration.HIVE_CONFIG_TAG,
        Configuration.HIVE_METASTORE_PASSWORD_PROPERTY, "", true);
    }

    String jobtrackerHost = getJobTrackerHost(cluster);
    if (!scHost.getHostName().equals(jobtrackerHost)) {
      if (configTags.get(Configuration.GLOBAL_CONFIG_TAG) != null) {
        configHelper.applyCustomConfig(
          configurations, Configuration.GLOBAL_CONFIG_TAG,
          Configuration.RCA_ENABLED_PROPERTY, "false", false);
      }
    }

    execCmd.setConfigurations(configurations);
    execCmd.setConfigurationTags(configTags);
    if (commandParams == null) { // if not defined
      commandParams = new TreeMap<String, String>();
    }
    commandParams.put(SCHEMA_VERSION, serviceInfo.getSchemaVersion());


    // Get command script info for custom command/custom action
    /*
     * TODO: Custom actions are not supported yet, that's why we just pass
     * component main commandScript to agent. This script is only used for
     * default commads like INSTALL/STOP/START/CONFIGURE
     */
    String commandTimeout = configs.getDefaultAgentTaskTimeout();
    CommandScriptDefinition script = componentInfo.getCommandScript();
    if (serviceInfo.getSchemaVersion().equals(AmbariMetaInfo.SCHEMA_VERSION_2)) {
      if (script != null) {
        commandParams.put(SCRIPT, script.getScript());
        commandParams.put(SCRIPT_TYPE, script.getScriptType().toString());
        if (script.getTimeout() > 0) {
          commandTimeout = String.valueOf(script.getTimeout());
        }
      } else {
        String message = String.format("Component %s of service %s has no " +
          "command script defined", componentName, serviceName);
        throw new AmbariException(message);
      }
    }
    commandParams.put(COMMAND_TIMEOUT, commandTimeout);
    commandParams.put(SERVICE_PACKAGE_FOLDER,
      serviceInfo.getServicePackageFolder());
    commandParams.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder());

    execCmd.setCommandParams(commandParams);

    String repoInfo = customCommandExecutionHelper.getRepoInfo(cluster, host);
    if (LOG.isDebugEnabled()) {
      LOG.debug("Sending repo information to agent"
        + ", hostname=" + scHost.getHostName()
        + ", clusterName=" + cluster.getClusterName()
        + ", stackInfo=" + stackId.getStackId()
        + ", repoInfo=" + repoInfo);
    }

    Map<String, String> hostParams = new TreeMap<String, String>();
    hostParams.put(REPO_INFO, repoInfo);
    hostParams.put(JDK_LOCATION, getJdkResourceUrl());
    hostParams.put(JAVA_HOME, getJavaHome());
    hostParams.put(JDK_NAME, getJDKName());
    hostParams.put(JCE_NAME, getJCEName());
    hostParams.put(STACK_NAME, stackId.getStackName());
    hostParams.put(STACK_VERSION, stackId.getStackVersion());
    hostParams.put(DB_NAME, getServerDB());
    hostParams.put(MYSQL_JDBC_URL, getMysqljdbcUrl());
    hostParams.put(ORACLE_JDBC_URL, getOjdbcUrl());
    hostParams.putAll(getRcaParameters());

    // Write down os specific info for the service
    ServiceOsSpecific anyOs = null;
    if (serviceInfo.getOsSpecifics().containsKey(AmbariMetaInfo.ANY_OS)) {
      anyOs = serviceInfo.getOsSpecifics().get(AmbariMetaInfo.ANY_OS);
    }
    ServiceOsSpecific hostOs = null;
    if (serviceInfo.getOsSpecifics().containsKey(osType)) {
      hostOs = serviceInfo.getOsSpecifics().get(osType);
      // Choose repo that is relevant for host
      ServiceOsSpecific.Repo serviceRepo = hostOs.getRepo();
      if (serviceRepo != null) {
        String serviceRepoInfo = gson.toJson(serviceRepo);
        hostParams.put(SERVICE_REPO_INFO, serviceRepoInfo);
View Full Code Here

    String stackName = request.getStackName();
    String stackVersion = request.getStackVersion();
    String serviceName = request.getServiceName();

    if (serviceName != null) {
      ServiceInfo service = this.ambariMetaInfo.getService(stackName, stackVersion, serviceName);
      response = Collections.singleton(service.convertToResponse());
    } else {
      Map<String, ServiceInfo> services = this.ambariMetaInfo.getServices(stackName, stackVersion);
      response = new HashSet<StackServiceResponse>();
      for (ServiceInfo service : services.values()) {
        response.add(service.convertToResponse());
      }
    }
    return response;
  }
View Full Code Here

   * Method: getServiceInfo(String stackName, String version, String
   * serviceName)
   */
  @Test
  public void getServiceInfo() throws Exception {
    ServiceInfo si = metaInfo.getServiceInfo(STACK_NAME_HDP, STACK_VERSION_HDP,
        SERVICE_NAME_HDFS);
    assertNotNull(si);
  }
View Full Code Here

    assertNotNull(si);
  }

  @Test
  public void testConfigDependencies() throws Exception {
    ServiceInfo serviceInfo = metaInfo.getServiceInfo(STACK_NAME_HDP, EXT_STACK_NAME,
      SERVICE_NAME_MAPRED2);
    assertNotNull(serviceInfo);
    assertTrue(!serviceInfo.getConfigDependencies().isEmpty());
  }
View Full Code Here

   * Make sure global mapping is avaliable when global.xml is
   * in the path.
   * @throws Exception
   */
  public void testGlobalMapping() throws Exception {
    ServiceInfo sinfo = metaInfo.getServiceInfo("HDP",
        "0.2", "HDFS");
    List<PropertyInfo> pinfo = sinfo.getProperties();
    /** check all the config knobs and make sure the global one is there **/
    boolean checkforglobal = false;
   
    for (PropertyInfo pinfol: pinfo) {
      if ("global.xml".equals(pinfol.getFilename())) {
        checkforglobal = true;
      }
    }
    Assert.assertTrue(checkforglobal);
    sinfo = metaInfo.getServiceInfo("HDP",
        "0.2", "MAPREDUCE");
    boolean checkforhadoopheapsize = false;
    pinfo = sinfo.getProperties();
    for (PropertyInfo pinfol: pinfo) {
      if ("global.xml".equals(pinfol.getFilename())) {
        if ("hadoop_heapsize".equals(pinfol.getName()))
          checkforhadoopheapsize = true;
      }
View Full Code Here

    }
  }
 
  @Test
  public void testGetService() throws Exception {
    ServiceInfo service = metaInfo.getService(STACK_NAME_HDP, STACK_VERSION_HDP, SERVICE_NAME_HDFS);
    Assert.assertEquals(service.getName(), SERVICE_NAME_HDFS);
    try {
      metaInfo.getService(STACK_NAME_HDP, STACK_VERSION_HDP, NON_EXT_VALUE);
    } catch (StackAccessException e) {
      Assert.assertTrue(e instanceof StackAccessException);
    }
View Full Code Here

    StackInfo stackInfo = metaInfo.getStackInfo(STACK_NAME_HDP, EXT_STACK_NAME);
    Assert.assertTrue(stackInfo != null);
    List<ServiceInfo> serviceInfos = stackInfo.getServices();
    Assert.assertFalse(serviceInfos.isEmpty());
    Assert.assertTrue(serviceInfos.size() > 1);
    ServiceInfo deletedService = null;
    ServiceInfo redefinedService = null;
    for (ServiceInfo serviceInfo : serviceInfos) {
      if (serviceInfo.getName().equals("SQOOP")) {
        deletedService = serviceInfo;
      }
      if (serviceInfo.getName().equals("YARN")) {
        redefinedService = serviceInfo;
      }
    }
    Assert.assertNull("SQOOP is a deleted service, should not be a part of " +
      "the extended stack.", deletedService);
    Assert.assertNotNull(redefinedService);
    // Components
    Assert.assertEquals("YARN service is expected to be defined with 3 active" +
      " components.", 3, redefinedService.getComponents().size());
    Assert.assertEquals("TEZ is expected to be a part of extended stack " +
      "definition", "TEZ", redefinedService.getClientComponent().getName());
    Assert.assertFalse("YARN CLIENT is a deleted component.",
      redefinedService.getClientComponent().getName().equals("YARN_CLIENT"));
    // Properties
    Assert.assertNotNull(redefinedService.getProperties());
    Assert.assertTrue(redefinedService.getProperties().size() > 4);
    PropertyInfo deleteProperty1 = null;
    PropertyInfo deleteProperty2 = null;
    PropertyInfo redefinedProperty1 = null;
    PropertyInfo redefinedProperty2 = null;
    PropertyInfo inheritedProperty = null;
    PropertyInfo newProperty = null;
    PropertyInfo originalProperty = null;

    for (PropertyInfo propertyInfo : redefinedService.getProperties()) {
      if (propertyInfo.getName().equals("yarn.resourcemanager" +
        ".resource-tracker.address")) {
        deleteProperty1 = propertyInfo;
      } else if (propertyInfo.getName().equals("yarn.resourcemanager" +
        ".scheduler.address")) {
        deleteProperty2 = propertyInfo;
      } else if (propertyInfo.getName().equals("yarn.resourcemanager" +
        ".address")) {
        redefinedProperty1 = propertyInfo;
      } else if (propertyInfo.getName().equals("yarn.resourcemanager.admin" +
        ".address")) {
        redefinedProperty2 = propertyInfo;
      } else if (propertyInfo.getName().equals("yarn.nodemanager.address")) {
        inheritedProperty = propertyInfo;
      } else if (propertyInfo.getName().equals("new-yarn-property")) {
        newProperty = propertyInfo;
      } else if (propertyInfo.getName().equals("yarn.nodemanager.aux-services")) {
        originalProperty = propertyInfo;
      }
    }

    Assert.assertNull(deleteProperty1);
    Assert.assertNull(deleteProperty2);
    Assert.assertNotNull(redefinedProperty1);
    Assert.assertNotNull(redefinedProperty2);
    Assert.assertNotNull("yarn.nodemanager.address expected to be inherited " +
      "from parent", inheritedProperty);
    Assert.assertEquals("localhost:100009", redefinedProperty1.getValue());
    // Parent property value will result in property being present in the
    // child stack
    Assert.assertEquals("localhost:8141", redefinedProperty2.getValue());
    // New property
    Assert.assertNotNull(newProperty);
    Assert.assertEquals("some-value", newProperty.getValue());
    Assert.assertEquals("some description.", newProperty.getDescription());
    Assert.assertEquals("yarn-site.xml", newProperty.getFilename());
    // Original property
    Assert.assertNotNull(originalProperty);
    Assert.assertEquals("mapreduce.shuffle", originalProperty.getValue());
    Assert.assertEquals("Auxilliary services of NodeManager",
      originalProperty.getDescription());
    Assert.assertEquals(3, redefinedService.getConfigDependencies().size());
  }
View Full Code Here

      metaInfo.getStackRoot());
    helper.fillInfo();
    List<ServiceInfo> allServices = helper.getAllApplicableServices(metaInfo
      .getStackInfo(STACK_NAME_HDP, EXT_STACK_NAME));

    ServiceInfo testService = null;
    ServiceInfo existingService = null;
    for (ServiceInfo serviceInfo : allServices) {
      if (serviceInfo.getName().equals("YARN")) {
        testService = serviceInfo;
      } else if (serviceInfo.getName().equals("MAPREDUCE2")) {
        existingService = serviceInfo;
      }
    }

    Assert.assertNotNull(testService);
    Assert.assertNotNull(existingService);

    PropertyInfo testProperty = null;
    PropertyInfo existingProperty = null;
    for (PropertyInfo property : testService.getProperties()) {
      if (property.getName().equals("new-yarn-property")) {
        testProperty = property;
      }
    }
    for (PropertyInfo property : existingService.getProperties()) {
      if (property.getName().equals("mapreduce.map.log.level")) {
        existingProperty = property;
      }
    }
View Full Code Here

    }
  }
 
  @Test
  public void testMetricsJson() throws Exception {
    ServiceInfo svc = metaInfo.getService(STACK_NAME_HDP, "2.0.5", "HDFS");
    Assert.assertNotNull(svc);
    Assert.assertNotNull(svc.getMetricsFile());
   
    svc = metaInfo.getService(STACK_NAME_HDP, "2.0.6", "HDFS");
    Assert.assertNotNull(svc);
    Assert.assertNotNull(svc.getMetricsFile());
   
    List<MetricDefinition> list = metaInfo.getMetrics(STACK_NAME_HDP, "2.0.5", "HDFS", "NAMENODE", "Component");
    Assert.assertNotNull(list);
   
    list = metaInfo.getMetrics(STACK_NAME_HDP, "2.0.5", "HDFS", "DATANODE", "Component");
View Full Code Here

TOP

Related Classes of org.apache.ambari.server.state.ServiceInfo

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.