Package org.apache.ambari.server.agent

Examples of org.apache.ambari.server.agent.ExecutionCommand


    RequestStatusResponse response = controller.createAction(request,
      requestProperties);

    List<HostRoleCommand> storedTasks = actionDB.getRequestTasks(response.getRequestId());
    ExecutionCommand execCmd = storedTasks.get(0).getExecutionCommandWrapper
      ().getExecutionCommand();
    Assert.assertNotNull(storedTasks);
    Assert.assertNotNull(execCmd.getConfigurationTags().get("hdfs-site"));
    Assert.assertEquals(1, storedTasks.size());
    Assert.assertEquals(host2, execCmd.getConfigurations().get
      ("hdfs-exclude-file").get("datanodes"));
    Assert.assertNotNull(execCmd.getConfigurationTags().get("hdfs-exclude-file"));
    Assert.assertEquals("c", execCmd.getConfigurations().get("hdfs-site").get("a"));
  }
View Full Code Here


      int currRoleOrder = -1;
      for (HostRoleCommand command : stage.getOrderedHostRoleCommands()) {
        if(command.getRole() == Role.AMBARI_SERVER_ACTION) {
          Assert.assertTrue(command.toString(), expectedTasks.isTaskExpected(command.getRole()));
          currRoleOrder = expectedTasks.getRoleOrder(command.getRole());
          ExecutionCommand execCommand = command.getExecutionCommandWrapper().getExecutionCommand();
          Assert.assertTrue(
              execCommand.getCommandParams().containsKey(ServerAction.PayloadName.CURRENT_STACK_VERSION));
          Assert.assertTrue(
              execCommand.getCommandParams().containsKey(ServerAction.PayloadName.CLUSTER_NAME));
          Assert.assertEquals(RoleCommand.EXECUTE, execCommand.getRoleCommand());
        } else {
          Assert.assertTrue(command.toString(), expectedTasks.isTaskExpected(command.getRole(), command.getHostName()));
          currRoleOrder = expectedTasks.getRoleOrder(command.getRole());
          ExecutionCommand execCommand = command.getExecutionCommandWrapper().getExecutionCommand();
          Assert.assertTrue(execCommand.getCommandParams().containsKey("source_stack_version"));
          Assert.assertTrue(execCommand.getCommandParams().containsKey("target_stack_version"));
          Assert.assertEquals(RoleCommand.UPGRADE, execCommand.getRoleCommand());
        }
      }

      List<HostRoleCommand> commands = stage.getOrderedHostRoleCommands();
      Assert.assertTrue(commands.size() > 0);
View Full Code Here

      Map<String, Map<String, String>> configTags = null;
      if (!actionContext.getServiceName().isEmpty()) {
        configTags = amcImpl.findConfigurationTagsWithOverrides(cluster, hostName);
      }

      ExecutionCommand execCmd = stage.getExecutionCommandWrapper(hostName,
          actionContext.getActionName()).getExecutionCommand();

      execCmd.setConfigurations(configurations);
      execCmd.setConfigurationTags(configTags);
      execCmd.setHostLevelParams(hostLevelParams);
      execCmd.setCommandParams(actionContext.getParameters());
      execCmd.setServiceName(serviceName);
      execCmd.setComponentName(componentName);

      // Generate cluster host info
      execCmd.setClusterHostInfo(
          StageUtils.getClusterHostInfo(clusters.getHostsForCluster(clusterName), cluster, hostsMap, configuration));
    }
  }
View Full Code Here

    s.setStageId(stageId);
    long now = System.currentTimeMillis();
    s.addHostRoleExecutionCommand(hostname, Role.NAMENODE, RoleCommand.INSTALL,
        new ServiceComponentHostInstallEvent("NAMENODE", hostname, now, "HDP-1.2.0"),
        "cluster1", "HDFS");
    ExecutionCommand execCmd = s.getExecutionCommandWrapper(hostname, "NAMENODE").getExecutionCommand();
    execCmd.setCommandId(s.getActionId());
    List<String> slaveHostList = new ArrayList<String>();
    slaveHostList.add(hostname);
    slaveHostList.add("host2");
    Map<String, String> hdfsSite = new TreeMap<String, String>();
    hdfsSite.put("dfs.block.size", "2560000000");
    Map<String, Map<String, String>> configurations =
        new TreeMap<String, Map<String, String>>();
    configurations.put("hdfs-site", hdfsSite);
    execCmd.setConfigurations(configurations);
    Map<String, String> params = new TreeMap<String, String>();
    params.put("jdklocation", "/x/y/z");
    execCmd.setHostLevelParams(params);
    Map<String, String> roleParams = new TreeMap<String, String>();
    roleParams.put("format", "false");
    execCmd.setRoleParams(roleParams);
    return s;
  }
View Full Code Here

    stage.addHostRoleExecutionCommand(scHost.getHostName(), Role.valueOf(scHost
        .getServiceComponentName()), command,
        event, scHost.getClusterName(),
        scHost.getServiceName());
    ExecutionCommand execCmd = stage.getExecutionCommandWrapper(scHost.getHostName(),
        scHost.getServiceComponentName()).getExecutionCommand();

    Host host = clusters.getHost(scHost.getHostName());

    // Hack - Remove passwords from configs
    if (event.getServiceComponentName().equals(Role.HIVE_CLIENT.toString())) {
      configHelper.applyCustomConfig(configurations, Configuration.HIVE_CONFIG_TAG,
          Configuration.HIVE_METASTORE_PASSWORD_PROPERTY, "", true);
    }

    execCmd.setConfigurations(configurations);
    execCmd.setConfigurationTags(configTags);
    execCmd.setCommandParams(commandParams);

    // send stack info to agent
    StackId stackId = scHost.getDesiredStackVersion();
    Map<String, List<RepositoryInfo>> repos = ambariMetaInfo.getRepository(
        stackId.getStackName(), stackId.getStackVersion());
    String repoInfo = "";
    if (!repos.containsKey(host.getOsType())) {
      // FIXME should this be an error?
      LOG.warn("Could not retrieve repo information for host"
          + ", hostname=" + scHost.getHostName()
          + ", clusterName=" + cluster.getClusterName()
          + ", stackInfo=" + stackId.getStackId());
    } else {
      repoInfo = gson.toJson(repos.get(host.getOsType()));
    }

    if (LOG.isDebugEnabled()) {
      LOG.debug("Sending repo information to agent"
          + ", hostname=" + scHost.getHostName()
          + ", clusterName=" + cluster.getClusterName()
          + ", stackInfo=" + stackId.getStackId()
          + ", repoInfo=" + repoInfo);
    }

    Map<String, String> params = new TreeMap<String, String>();
    params.put("repo_info",       repoInfo);
    params.put("jdk_location",    jdkResourceUrl);
    params.put("stack_version",   stackId.getStackVersion());
    params.put("db_name",         serverDB);
    params.put("mysql_jdbc_url" , mysqljdbcUrl);
    params.put("oracle_jdbc_url", ojdbcUrl);

    if (configs.getServerDBName().equalsIgnoreCase(Configuration
        .ORACLE_DB_NAME)) {
      params.put("db_driver_filename", configs.getOjdbcJarName());
    } else if (configs.getServerDBName().equalsIgnoreCase(Configuration
        .MYSQL_DB_NAME)) {
      params.put("db_driver_filename", configs.getMySQLJarName());
    }
    execCmd.setHostLevelParams(params);

    Map<String, String> roleParams = new TreeMap<String, String>();
    execCmd.setRoleParams(roleParams);
  }
View Full Code Here

      throw new RuntimeException("Attempt to set stageId again! Not allowed.");
    }
    this.stageId = stageId;
    for (String host: this.commandsToSend.keySet()) {
      for (ExecutionCommandWrapper wrapper : this.commandsToSend.get(host)) {
        ExecutionCommand cmd = wrapper.getExecutionCommand();
        cmd.setCommandId(StageUtils.getActionId(requestId, stageId));
      }
    }
  }
View Full Code Here

   * for a given stage.
   */
  public synchronized void addHostRoleExecutionCommand(String host, Role role,  RoleCommand command,
      ServiceComponentHostEvent event, String clusterName, String serviceName) {
    HostRoleCommand hrc = new HostRoleCommand(host, role, event, command);
    ExecutionCommand cmd = new ExecutionCommand();
    ExecutionCommandWrapper wrapper = new ExecutionCommandWrapper(cmd);
    hrc.setExecutionCommandWrapper(wrapper);
    cmd.setHostname(host);
    cmd.setClusterName(clusterName);
    cmd.setServiceName(serviceName);
    cmd.setCommandId(this.getActionId());
    cmd.setRole(role.name());
    cmd.setRoleCommand(command);
   
    Map<String, HostRoleCommand> hrcMap = this.hostRoleCommands.get(host);
    if (hrcMap == null) {
      hrcMap = new TreeMap<String, HostRoleCommand>();
      this.hostRoleCommands.put(host, hrcMap);
View Full Code Here

  public synchronized void addServerActionCommand(
      String actionName, Role role,  RoleCommand command, String clusterName,
      ServiceComponentHostUpgradeEvent event, String hostName) {
    HostRoleCommand hrc = new HostRoleCommand(hostName, role, event, command);
    ExecutionCommand cmd = new ExecutionCommand();
    ExecutionCommandWrapper wrapper = new ExecutionCommandWrapper(cmd);
    hrc.setExecutionCommandWrapper(wrapper);
    cmd.setHostname(hostName);
    cmd.setClusterName(clusterName);
    cmd.setServiceName("");
    cmd.setCommandId(this.getActionId());
    cmd.setRole(role.name());
    cmd.setRoleCommand(command);

    Map<String, String> roleParams = new HashMap<String, String>();
    roleParams.put(ServerAction.ACTION_NAME, actionName);
    cmd.setRoleParams(roleParams);
    Map<String, HostRoleCommand> hrcMap = this.hostRoleCommands.get(hostName);
    if (hrcMap == null) {
      hrcMap = new TreeMap<String, HostRoleCommand>();
      this.hostRoleCommands.put(hostName, hrcMap);
    }
View Full Code Here

    sch1.refresh();
    Assert.assertTrue(sch1.getDesiredStackVersion().compareTo(newStack) == 0);
    sch2.refresh();
    Assert.assertTrue(sch2.getDesiredStackVersion().compareTo(newStack) == 0);
    for (HostRoleCommand command : stages.get(0).getOrderedHostRoleCommands()) {
      ExecutionCommand execCommand = command.getExecutionCommandWrapper().getExecutionCommand();
      Assert.assertTrue(execCommand.getCommandParams().containsKey("source_stack_version"));
      Assert.assertTrue(execCommand.getCommandParams().containsKey("target_stack_version"));
      Assert.assertEquals(RoleCommand.UPGRADE, execCommand.getRoleCommand());
    }

    sch1.setState(State.INSTALLED);
    sch1.setDesiredState(State.INSTALLED);
    sch2.setState(State.UPGRADING);
    sch2.setDesiredState(State.INSTALLED);
    sch3.setState(State.UPGRADING);
    sch3.setDesiredState(State.INSTALLED);

    sch3.setStackVersion(oldStack);
    sch3.setDesiredStackVersion(newStack);

    reqs.clear();
    req1 = new ServiceComponentHostRequest(clusterName, serviceName1,
        componentName1, host1,
        null, State.INSTALLED.toString());
    req1.setDesiredStackId("HDP-0.2");
    reqs.add(req1);
    req2 = new ServiceComponentHostRequest(clusterName, serviceName1,
        componentName1, host2,
        null, State.INSTALLED.toString());
    req2.setDesiredStackId("HDP-0.2");
    reqs.add(req2);
    req3 = new ServiceComponentHostRequest(clusterName, serviceName1,
        componentName2, host1,
        null, State.INSTALLED.toString());
    req3.setDesiredStackId("HDP-0.2");
    reqs.add(req3);

    resp = controller.updateHostComponents(reqs, Collections.<String, String>emptyMap(), true);
    stages = actionDB.getAllStages(resp.getRequestId());
    Assert.assertEquals(2, stages.size());
    Assert.assertEquals(2, stages.get(0).getOrderedHostRoleCommands().size());
    Assert.assertEquals("", stages.get(0).getRequestContext());
    Assert.assertEquals(State.UPGRADING, sch1.getState());
    Assert.assertEquals(State.UPGRADING, sch2.getState());
    Assert.assertEquals(State.UPGRADING, sch3.getState());
    sch1.refresh();
    Assert.assertTrue(sch1.getDesiredStackVersion().compareTo(newStack) == 0);
    sch2.refresh();
    Assert.assertTrue(sch2.getDesiredStackVersion().compareTo(newStack) == 0);
    sch3.refresh();
    Assert.assertTrue(sch3.getDesiredStackVersion().compareTo(newStack) == 0);
    for (Stage stage : stages) {
      for (HostRoleCommand command : stage.getOrderedHostRoleCommands()) {
        ExecutionCommand execCommand = command.getExecutionCommandWrapper().getExecutionCommand();
        Assert.assertTrue(execCommand.getCommandParams().containsKey("source_stack_version"));
        Assert.assertTrue(execCommand.getCommandParams().containsKey("target_stack_version"));
        Assert.assertEquals("{\"stackName\":\"HDP\",\"stackVersion\":\"0.2\"}",
            execCommand.getCommandParams().get("target_stack_version"));
        Assert.assertEquals(RoleCommand.UPGRADE, execCommand.getRoleCommand());
      }
    }
  }
View Full Code Here

        }
      }
    }
    Assert.assertNotNull(hdfsCmdHost3);
    Assert.assertNotNull(hdfsCmdHost2);
    ExecutionCommand execCmd = hdfsCmdHost3.getExecutionCommandWrapper()
      .getExecutionCommand();
    Assert.assertEquals(2, execCmd.getConfigurationTags().size());
    Assert.assertEquals("version122", execCmd.getConfigurationTags().get
      ("core-site").get("tag"));
    Assert.assertEquals("d", execCmd.getConfigurations().get("core-site")
      .get("c"));
    // Check if MapReduce client is reinstalled
    Assert.assertNotNull(mapRedCmdHost2);
    Assert.assertNotNull(mapRedCmdHost3);
View Full Code Here

TOP

Related Classes of org.apache.ambari.server.agent.ExecutionCommand

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.