Package org.apache.slider.core.conf

Examples of org.apache.slider.core.conf.MapOperations


    Path clusterDirPath = new Path(sliderClusterURI);
    SliderFileSystem fs = getClusterFS();

    // build up information about the running application -this
    // will be passed down to the cluster status
    MapOperations appInformation = new MapOperations();

    AggregateConf instanceDefinition =
      InstanceIO.loadInstanceDefinitionUnresolved(fs, clusterDirPath);
    instanceDefinition.setName(clustername);

    log.info("Deploying cluster {}:", instanceDefinition);

    stateForProviders.setApplicationName(clustername);
   
    // triggers resolution and snapshotting in agent
    appState.updateInstanceDefinition(instanceDefinition);
    File confDir = getLocalConfDir();
    if (!confDir.exists() || !confDir.isDirectory()) {
      log.info("Conf dir {} does not exist.", confDir);
      File parentFile = confDir.getParentFile();
      log.info("Parent dir {}:\n{}", parentFile, SliderUtils.listDir(parentFile));
    }

    Configuration serviceConf = getConfig();
    // Try to get the proper filtering of static resources through the yarn proxy working
    serviceConf.set("hadoop.http.filter.initializers",
                    SliderAmFilterInitializer.NAME);
    serviceConf.set(SliderAmIpFilter.WS_CONTEXT_ROOT, WS_CONTEXT_ROOT);
   
    conf = new YarnConfiguration(serviceConf);
    //get our provider
    MapOperations globalInternalOptions =
      instanceDefinition.getInternalOperations().getGlobalOptions();
    String providerType = globalInternalOptions.getMandatoryOption(
      OptionKeys.INTERNAL_PROVIDER_NAME);
    log.info("Cluster provider type is {}", providerType);
    SliderProviderFactory factory =
      SliderProviderFactory.createSliderProviderFactory(
          providerType);
    providerService = factory.createServerProvider();
    // init the provider BUT DO NOT START IT YET
    initAndAddService(providerService);
    // create a slider AM provider
    sliderAMProvider = new SliderAMProviderService();
    initAndAddService(sliderAMProvider);
   
    InetSocketAddress address = SliderUtils.getRmSchedulerAddress(conf);
    log.info("RM is at {}", address);
    yarnRPC = YarnRPC.create(conf);

    /*
     * Extract the container ID. This is then
     * turned into an (incompete) container
     */
    appMasterContainerID = ConverterUtils.toContainerId(
      SliderUtils.mandatoryEnvVariable(
          ApplicationConstants.Environment.CONTAINER_ID.name())
                                                       );
    appAttemptID = appMasterContainerID.getApplicationAttemptId();

    ApplicationId appid = appAttemptID.getApplicationId();
    log.info("AM for ID {}", appid.getId());

    appInformation.put(StatusKeys.INFO_AM_CONTAINER_ID,
                       appMasterContainerID.toString());
    appInformation.put(StatusKeys.INFO_AM_APP_ID,
                       appid.toString());
    appInformation.put(StatusKeys.INFO_AM_ATTEMPT_ID,
                       appAttemptID.toString());

    UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
    Credentials credentials =
      currentUser.getCredentials();
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    dob.close();
    // Now remove the AM->RM token so that containers cannot access it.
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    while (iter.hasNext()) {
      Token<?> token = iter.next();
      log.info("Token {}", token.getKind());
      if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
        iter.remove();
      }
    }
    allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
   
    // set up secret manager
    secretManager = new ClientToAMTokenSecretManager(appAttemptID, null);

    // if not a secure cluster, extract the username -it will be
    // propagated to workers
    if (!UserGroupInformation.isSecurityEnabled()) {
      hadoop_user_name = System.getenv(HADOOP_USER_NAME);
      service_user_name = hadoop_user_name;
      log.info(HADOOP_USER_NAME + "='{}'", hadoop_user_name);
    } else {
      service_user_name = UserGroupInformation.getCurrentUser().getUserName();
    }

    Map<String, String> envVars;

    /**
     * It is critical this section is synchronized, to stop async AM events
     * arriving while registering a restarting AM.
     */
    synchronized (appState) {
      int heartbeatInterval = HEARTBEAT_INTERVAL;

      //add the RM client -this brings the callbacks in
      asyncRMClient = AMRMClientAsync.createAMRMClientAsync(heartbeatInterval,
                                                            this);
      addService(asyncRMClient);
      //wrap it for the app state model
      rmOperationHandler = new AsyncRMOperationHandler(asyncRMClient);
      //now bring it up
      deployChildService(asyncRMClient);


      //nmclient relays callbacks back to this class
      nmClientAsync = new NMClientAsyncImpl("nmclient", this);
      deployChildService(nmClientAsync);

      //bring up the Slider RPC service
      startSliderRPCServer();

      rpcServiceAddress = rpcService.getConnectAddress();
      appMasterHostname = rpcServiceAddress.getHostName();
      appMasterRpcPort = rpcServiceAddress.getPort();
      appMasterTrackingUrl = null;
      log.info("AM Server is listening at {}:{}", appMasterHostname,
               appMasterRpcPort);
      appInformation.put(StatusKeys.INFO_AM_HOSTNAME, appMasterHostname);
      appInformation.set(StatusKeys.INFO_AM_RPC_PORT, appMasterRpcPort);

     
      //registry
      registry = startRegistrationService();

      //build the role map
      List<ProviderRole> providerRoles =
        new ArrayList<>(providerService.getRoles());
      providerRoles.addAll(SliderAMClientProvider.ROLES);

      // Start up the WebApp and track the URL for it
      webApp = new SliderAMWebApp(registry);
      WebApps.$for(SliderAMWebApp.BASE_PATH, WebAppApi.class,
          new WebAppApiImpl(this, stateForProviders, providerService),
          RestPaths.WS_CONTEXT)
                      .with(serviceConf)
                      .start(webApp);
      appMasterTrackingUrl = "http://" + appMasterHostname + ":" + webApp.port();
      WebAppService<SliderAMWebApp> webAppService =
        new WebAppService<>("slider", webApp);

      webAppService.init(conf);
      webAppService.start();
      addService(webAppService);

      appInformation.put(StatusKeys.INFO_AM_WEB_URL, appMasterTrackingUrl + "/");
      appInformation.set(StatusKeys.INFO_AM_WEB_PORT, webApp.port());     

      // Register self with ResourceManager
      // This will start heartbeating to the RM
      // address = SliderUtils.getRmSchedulerAddress(asyncRMClient.getConfig());
      log.info("Connecting to RM at {},address tracking URL={}",
               appMasterRpcPort, appMasterTrackingUrl);
      RegisterApplicationMasterResponse response = asyncRMClient
        .registerApplicationMaster(appMasterHostname,
                                   appMasterRpcPort,
                                   appMasterTrackingUrl);
      Resource maxResources =
        response.getMaximumResourceCapability();
      containerMaxMemory = maxResources.getMemory();
      containerMaxCores = maxResources.getVirtualCores();
      appState.setContainerLimits(maxResources.getMemory(),
                                  maxResources.getVirtualCores());
      // set the RM-defined maximum cluster values
      appInformation.put(ResourceKeys.YARN_CORES, Integer.toString(containerMaxCores));
      appInformation.put(ResourceKeys.YARN_MEMORY, Integer.toString(containerMaxMemory));
     
      boolean securityEnabled = UserGroupInformation.isSecurityEnabled();
      if (securityEnabled) {
        secretManager.setMasterKey(
          response.getClientToAMTokenMasterKey().array());
        applicationACLs = response.getApplicationACLs();

        //tell the server what the ACLs are
        rpcService.getServer().refreshServiceAcl(conf, new SliderAMPolicyProvider());
      }

      // extract container list
      List<Container> liveContainers =
          response.getContainersFromPreviousAttempts();

      //now validate the installation
      Configuration providerConf =
        providerService.loadProviderConfigurationInformation(confDir);

      providerService.validateApplicationConfiguration(instanceDefinition,
                                                       confDir,
                                                       securityEnabled);

      //determine the location for the role history data
      Path historyDir = new Path(clusterDirPath, HISTORY_DIR_NAME);

      //build the instance
      appState.buildInstance(instanceDefinition,
                             providerConf,
                             providerRoles,
                             fs.getFileSystem(),
                             historyDir,
                             liveContainers,
                             appInformation);

      // add the AM to the list of nodes in the cluster
     
      appState.buildAppMasterNode(appMasterContainerID,
                                  appMasterHostname,
                                  webApp.port(),
                                  appMasterHostname + ":" + webApp.port());

      // build up environment variables that the AM wants set in every container
      // irrespective of provider and role.
      envVars = new HashMap<>();
      if (hadoop_user_name != null) {
        envVars.put(HADOOP_USER_NAME, hadoop_user_name);
      }
    }
    String rolesTmpSubdir = appMasterContainerID.toString() + "/roles";

    String amTmpDir = globalInternalOptions.getMandatoryOption(OptionKeys.INTERNAL_AM_TMP_DIR);

    Path tmpDirPath = new Path(amTmpDir);
    Path launcherTmpDirPath = new Path(tmpDirPath, rolesTmpSubdir);
    fs.getFileSystem().mkdirs(launcherTmpDirPath);
   
View Full Code Here


    Set<String> roleNames = resources.getComponentNames();
    for (String name : roleNames) {
      if (!roles.containsKey(name)) {
        // this is a new value
        log.info("Adding new role {}", name);
        MapOperations resComponent = resources.getComponent(name);
        ProviderRole dynamicRole = createDynamicProviderRole(name, resComponent);
        buildRole(dynamicRole);
        providerRoles.add(dynamicRole);
      }
    }
    //then pick up the requirements
    buildRoleRequirementsFromResources();


    //set the livespan
    MapOperations globalInternalOpts =
      instanceDefinition.getInternalOperations().getGlobalOptions();
    startTimeThreshold = globalInternalOpts.getOptionInt(
      OptionKeys.INTERNAL_CONTAINER_FAILURE_SHORTLIFE,
      OptionKeys.DEFAULT_CONTAINER_FAILURE_SHORTLIFE);
   
    failureThreshold = globalInternalOpts.getOptionInt(
      OptionKeys.INTERNAL_CONTAINER_FAILURE_THRESHOLD,
      OptionKeys.DEFAULT_CONTAINER_FAILURE_THRESHOLD);
    initClusterStatus();

View Full Code Here

  public void initClusterStatus() {
    //copy into cluster status.
    ClusterDescription status = ClusterDescription.copy(clusterSpec);
    status.state = ClusterDescription.STATE_CREATED;
    MapOperations infoOps = new MapOperations("info", status.info);
    infoOps.mergeWithoutOverwrite(applicationInfo);
    SliderUtils.addBuildInfo(infoOps, "status");

    long now = now();
    status.setInfoTime(StatusKeys.INFO_LIVE_TIME_HUMAN,
                              StatusKeys.INFO_LIVE_TIME_MILLIS,
View Full Code Here

    // Add all the existing roles
    for (RoleStatus roleStatus : getRoleStatusMap().values()) {
      int currentDesired = roleStatus.getDesired();
      String role = roleStatus.getName();
      MapOperations comp =
        resources.getComponent(role);
      int desiredInstanceCount =
        resources.getComponentOptInt(role, ResourceKeys.COMPONENT_INSTANCES, 0);
      if (desiredInstanceCount == 0) {
        log.warn("Role {} has 0 instances specified", role);
View Full Code Here

    if (providerStatus != null) {
      for (Map.Entry<String, String> entry : providerStatus.entrySet()) {
        cd.setInfo(entry.getKey(),entry.getValue());
      }
    }
    MapOperations infoOps = new MapOperations("info",cd.info);
    infoOps.mergeWithoutOverwrite(applicationInfo);
    SliderUtils.addBuildInfo(infoOps, "status");
    cd.statistics = new HashMap<>();

    // build the map of node -> container IDs
    Map<String, List<String>> instanceMap = createRoleToInstanceMap();
View Full Code Here

    instanceDefinition.resolve();
    launchedInstanceDefinition = instanceDefinition;

    ConfTreeOperations internalOperations =
      instanceDefinition.getInternalOperations();
    MapOperations internalOptions = internalOperations.getGlobalOptions();
    ConfTreeOperations resourceOperations =
      instanceDefinition.getResourceOperations();
    ConfTreeOperations appOperations =
      instanceDefinition.getAppConfOperations();
    Path generatedConfDirPath =
      createPathThatMustExist(internalOptions.getMandatoryOption(
        OptionKeys.INTERNAL_GENERATED_CONF_PATH));
    Path snapshotConfPath =
      createPathThatMustExist(internalOptions.getMandatoryOption(
        OptionKeys.INTERNAL_SNAPSHOT_CONF_PATH));


    // cluster Provider
    AbstractClientProvider provider = createClientProvider(
      internalOptions.getMandatoryOption(
        OptionKeys.INTERNAL_PROVIDER_NAME));
    // make sure the conf dir is valid;
   
    // now build up the image path
    // TODO: consider supporting apps that don't have an image path
    Path imagePath =
      SliderUtils.extractImagePath(sliderFileSystem, internalOptions);
    if (log.isDebugEnabled()) {
      log.debug(instanceDefinition.toString());
    }
    MapOperations sliderAMResourceComponent =
      resourceOperations.getOrAddComponent(SliderKeys.COMPONENT_AM);

    // add the tags if available
    Set<String> applicationTags = provider.getApplicationTags(sliderFileSystem,
        appOperations.getGlobalOptions().get(AgentKeys.APP_DEF));
View Full Code Here

    Container container = createNiceMock(Container.class);
    String role = "HBASE_MASTER";
    SliderFileSystem sliderFileSystem = createNiceMock(SliderFileSystem.class);
    ContainerLauncher launcher = createNiceMock(ContainerLauncher.class);
    Path generatedConfPath = new Path(".", "test");
    MapOperations resourceComponent = new MapOperations();
    MapOperations appComponent = new MapOperations();
    Path containerTmpDirPath = new Path(".", "test");
    FileSystem mockFs = new MockFileSystem();
    expect(sliderFileSystem.getFileSystem())
        .andReturn(new FilterFileSystem(mockFs)).anyTimes();
    expect(sliderFileSystem.createAmResource(anyObject(Path.class),
View Full Code Here

    String role_hm = "HBASE_MASTER";
    String role_hrs = "HBASE_REGIONSERVER";
    SliderFileSystem sliderFileSystem = createNiceMock(SliderFileSystem.class);
    ContainerLauncher launcher = createNiceMock(ContainerLauncher.class);
    Path generatedConfPath = new Path(".", "test");
    MapOperations resourceComponent = new MapOperations();
    MapOperations appComponent = new MapOperations();
    Path containerTmpDirPath = new Path(".", "test");
    FileSystem mockFs = new MockFileSystem();
    expect(sliderFileSystem.getFileSystem())
        .andReturn(new FilterFileSystem(mockFs)).anyTimes();
    expect(sliderFileSystem.createAmResource(anyObject(Path.class),
View Full Code Here

    launcher.setEnv(ACCUMULO_LOG_DIR, ApplicationConstants.LOG_DIR_EXPANSION_VAR);
    ConfTreeOperations appConf =
      instanceDefinition.getAppConfOperations();
    String hadoop_home =
      ApplicationConstants.Environment.HADOOP_COMMON_HOME.$();
    MapOperations appConfGlobal = appConf.getGlobalOptions();
    hadoop_home = appConfGlobal.getOption(OPTION_HADOOP_HOME, hadoop_home);
    launcher.setEnv(HADOOP_HOME, hadoop_home);
    launcher.setEnv(HADOOP_PREFIX, hadoop_home);
   
    // By not setting ACCUMULO_HOME, this will cause the Accumulo script to
    // compute it on its own to an absolute path.

    launcher.setEnv(ACCUMULO_CONF_DIR,
            ProviderUtils.convertToAppRelativePath(
              SliderKeys.PROPAGATED_CONF_DIR_NAME));
    launcher.setEnv(ZOOKEEPER_HOME, appConfGlobal.getMandatoryOption(OPTION_ZK_HOME));

    //local resources


    //add the configuration resources
View Full Code Here

                                          String... commands) throws
                                                                IOException,
      SliderException {
    env.put(ACCUMULO_LOG_DIR, ApplicationConstants.LOG_DIR_EXPANSION_VAR);
    String hadoop_home = System.getenv(HADOOP_HOME);
    MapOperations globalOptions =
      instance.getAppConfOperations().getGlobalOptions();
    hadoop_home = globalOptions.getOption(OPTION_HADOOP_HOME, hadoop_home);
    if (hadoop_home == null) {
      throw new BadConfigException(
        "Undefined env variable/config option: " + HADOOP_HOME);
    }
    ProviderUtils.validatePathReferencesLocalDir("HADOOP_HOME", hadoop_home);
    env.put(HADOOP_HOME, hadoop_home);
    env.put(HADOOP_PREFIX, hadoop_home);
    //buildup accumulo home env variable to be absolute or relative
    String accumulo_home = providerUtils.buildPathToHomeDir(instance,
      "bin", "accumulo");
    File image = new File(accumulo_home);
    String accumuloPath = image.getAbsolutePath();
    env.put(ACCUMULO_HOME, accumuloPath);
    ProviderUtils.validatePathReferencesLocalDir("ACCUMULO_HOME", accumuloPath);
    env.put(ACCUMULO_CONF_DIR, confDir.getAbsolutePath());
    String zkHome = globalOptions.getMandatoryOption(OPTION_ZK_HOME);
    ProviderUtils.validatePathReferencesLocalDir("ZOOKEEPER_HOME", zkHome);

    env.put(ZOOKEEPER_HOME, zkHome);

View Full Code Here

TOP

Related Classes of org.apache.slider.core.conf.MapOperations

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.