Examples of YarnClientApplication


Examples of org.apache.hadoop.yarn.client.api.YarnClientApplication

            + ", userAcl=" + userAcl.name());
      }
    }   

    // Get a new application id
    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
    // TODO get min/max resource capabilities from RM and change memory ask if needed
    // If we do not have min/max, we may not be able to correctly request
    // the required resources from the RM for the app master
    // Memory ask has to be a multiple of min and less than max.
    // Dump out information about cluster capability as seen by the resource manager
    int maxMem = appResponse.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);

    // A resource ask cannot exceed the max.
    if (amMemory > maxMem) {
      LOG.info("AM memory specified above max threshold of cluster. Using max value."
          + ", specified=" + amMemory
          + ", max=" + maxMem);
      amMemory = maxMem;
    }       

    int maxVCores = appResponse.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max virtual cores capabililty of resources in this cluster " + maxVCores);
   
    if (amVCores > maxVCores) {
      LOG.info("AM virtual cores specified above max threshold of cluster. "
          + "Using max value." + ", specified=" + amVCores
          + ", max=" + maxVCores);
      amVCores = maxVCores;
    }
   
    // set the application name
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    ApplicationId appId = appContext.getApplicationId();

    appContext.setKeepContainersAcrossApplicationAttempts(keepContainers);
    appContext.setApplicationName(appName);
View Full Code Here

Examples of org.apache.hadoop.yarn.client.api.YarnClientApplication

  }

  @Override
  public ProcessLauncher<ApplicationId> createLauncher(TwillSpecification twillSpec) throws Exception {
    // Request for new application
    YarnClientApplication application = yarnClient.createApplication();
    final GetNewApplicationResponse response = application.getNewApplicationResponse();
    final ApplicationId appId = response.getApplicationId();

    // Setup the context for application submission
    final ApplicationSubmissionContext appSubmissionContext = application.getApplicationSubmissionContext();
    appSubmissionContext.setApplicationId(appId);
    appSubmissionContext.setApplicationName(twillSpec.getName());

    ApplicationSubmitter submitter = new ApplicationSubmitter() {
      @Override
View Full Code Here

Examples of org.apache.hadoop.yarn.client.api.YarnClientApplication

    // init our connection to YARN ResourceManager RPC
    LOG.info("Running Client");
    yarnClient.start();
    // request an application id from the RM
// Get a new application id
    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse getNewAppResponse = app.
      getNewApplicationResponse();
    checkPerNodeResourcesAvailable(getNewAppResponse);
    // configure our request for an exec container for GiraphApplicationMaster
    ApplicationSubmissionContext appContext = app.
      getApplicationSubmissionContext();
    appId = appContext.getApplicationId();
    //createAppSubmissionContext(appContext);
    appContext.setApplicationId(appId);
    appContext.setApplicationName(jobName);
View Full Code Here

Examples of org.apache.hadoop.yarn.client.api.YarnClientApplication

    System.out.println("\tJobManager memory = "+jmMemory);
    System.out.println("\tTaskManager memory = "+tmMemory);
    System.out.println("\tTaskManager cores = "+tmCores);

    // Create application via yarnClient
    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
    Resource maxRes = appResponse.getMaximumResourceCapability();
    if(tmMemory > maxRes.getMemory() || tmCores > maxRes.getVirtualCores()) {
      LOG.error("The cluster does not have the requested resources for the TaskManagers available!\n"
          + "Maximum Memory: "+maxRes.getMemory() +", Maximum Cores: "+tmCores);
      yarnClient.stop();
      System.exit(1);
    }
    if(jmMemory > maxRes.getMemory() ) {
      LOG.error("The cluster does not have the requested resources for the JobManager available!\n"
          + "Maximum Memory: "+maxRes.getMemory());
      yarnClient.stop();
      System.exit(1);
    }
    int totalMemoryRequired = jmMemory + tmMemory * taskManagerCount;
    ClusterResourceDescription freeClusterMem = getCurrentFreeClusterResources(yarnClient);
    if(freeClusterMem.totalFreeMemory < totalMemoryRequired) {
      LOG.error("This YARN session requires "+totalMemoryRequired+"MB of memory in the cluster. "
          + "There are currently only "+freeClusterMem.totalFreeMemory+"MB available.");
      yarnClient.stop();
      System.exit(1);
    }
    if( tmMemory > freeClusterMem.containerLimit) {
      LOG.error("The requested amount of memory for the TaskManagers ("+tmMemory+"MB) is more than "
          + "the largest possible YARN container: "+freeClusterMem.containerLimit);
      yarnClient.stop();
      System.exit(1);
    }
    if( jmMemory > freeClusterMem.containerLimit) {
      LOG.error("The requested amount of memory for the JobManager ("+jmMemory+"MB) is more than "
          + "the largest possible YARN container: "+freeClusterMem.containerLimit);
      yarnClient.stop();
      System.exit(1);
    }

    // respect custom JVM options in the YAML file
    final String javaOpts = GlobalConfiguration.getString(ConfigConstants.FLINK_JVM_OPTIONS, "");

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records
        .newRecord(ContainerLaunchContext.class);

    String amCommand = "$JAVA_HOME/bin/java"
          + " -Xmx"+Utils.calculateHeapSize(jmMemory)+"M " +javaOpts;
    if(hasLogback || hasLog4j) {
      amCommand += " -Dlog.file=\""+ApplicationConstants.LOG_DIR_EXPANSION_VAR +"/jobmanager-main.log\"";
    }
    if(hasLogback) {
      amCommand += " -Dlogback.configurationFile=file:logback.xml";
    }
    if(hasLog4j) {
      amCommand += " -Dlog4j.configuration=file:log4j.properties";
    }
   
    amCommand   += " "+ApplicationMaster.class.getName()+" "
          + " 1>"
          + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager-stdout.log"
          + " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager-stderr.log";
    amContainer.setCommands(Collections.singletonList(amCommand));

    System.err.println("amCommand="+amCommand);

    // Set-up ApplicationSubmissionContext for the application
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    final ApplicationId appId = appContext.getApplicationId();
    /**
     * All network ports are offsetted by the application number
     * to avoid version port clashes when running multiple Flink sessions
     * in parallel
View Full Code Here

Examples of org.apache.hadoop.yarn.client.api.YarnClientApplication

    YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);
    yarnClient.start();
   
    // Create application via yarnClient
    YarnClientApplication app = yarnClient.createApplication();

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer =
        Records.newRecord(ContainerLaunchContext.class);
    amContainer.setCommands(
        Collections.singletonList(
            "$JAVA_HOME/bin/java" +
            " -Xmx256M" +
            " com.hortonworks.simpleyarnapp.ApplicationMaster" +
            " " + command +
            " " + String.valueOf(n) +
            " 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout" +
            " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr"
            )
        );
   
    // Setup jar for ApplicationMaster
    LocalResource appMasterJar = Records.newRecord(LocalResource.class);
    setupAppMasterJar(jarPath, appMasterJar);
    amContainer.setLocalResources(
        Collections.singletonMap("simpleapp.jar", appMasterJar));

    // Setup CLASSPATH for ApplicationMaster
    Map<String, String> appMasterEnv = new HashMap<String, String>();
    setupAppMasterEnv(appMasterEnv);
    amContainer.setEnvironment(appMasterEnv);
   
    // Set up resource type requirements for ApplicationMaster
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(256);
    capability.setVirtualCores(1);

    // Finally, set-up ApplicationSubmissionContext for the application
    ApplicationSubmissionContext appContext =
    app.getApplicationSubmissionContext();
    appContext.setApplicationName("simple-yarn-app"); // application name
    appContext.setAMContainerSpec(amContainer);
    appContext.setResource(capability);
    appContext.setQueue("default"); // queue
View Full Code Here

Examples of org.apache.hadoop.yarn.client.api.YarnClientApplication

    System.out.println("\tJobManager memory = "+jmMemory);
    System.out.println("\tTaskManager memory = "+tmMemory);
    System.out.println("\tTaskManager cores = "+tmCores);

    // Create application via yarnClient
    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
    Resource maxRes = appResponse.getMaximumResourceCapability();
    if(tmMemory > maxRes.getMemory() || tmCores > maxRes.getVirtualCores()) {
      LOG.fatal("The cluster does not have the requested resources for the TaskManagers available!\n"
          + "Maximum Memory: "+maxRes.getMemory() +", Maximum Cores: "+tmCores);
      yarnClient.stop();
      System.exit(1);
    }
    if(jmMemory > maxRes.getMemory() ) {
      LOG.fatal("The cluster does not have the requested resources for the JobManager available!\n"
          + "Maximum Memory: "+maxRes.getMemory());
      yarnClient.stop();
      System.exit(1);
    }
   
    // respect custom JVM options in the YAML file
    final String javaOpts = GlobalConfiguration.getString(ConfigConstants.STRATOSPHERE_JVM_OPTIONS, "");
   
    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records
        .newRecord(ContainerLaunchContext.class);
   
    String amCommand = "$JAVA_HOME/bin/java"
          + " -Xmx"+jmMemory+"M " +javaOpts;
    if(hasLog4j) {
      amCommand   += " -Dlog.file=\""+ApplicationConstants.LOG_DIR_EXPANSION_VAR +"/jobmanager-log4j.log\" -Dlog4j.configuration=file:log4j.properties";
    }
    amCommand   += " eu.stratosphere.yarn.ApplicationMaster" + " "
          + " 1>"
          + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager-stdout.log"
          + " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager-stderr.log";
    amContainer.setCommands(Collections.singletonList(amCommand));
   
    System.err.println("amCommand="+amCommand);
   
    // Set-up ApplicationSubmissionContext for the application
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    final ApplicationId appId = appContext.getApplicationId();
   
    // Setup jar for ApplicationMaster
    LocalResource appMasterJar = Records.newRecord(LocalResource.class);
    LocalResource stratosphereConf = Records.newRecord(LocalResource.class);
View Full Code Here

Examples of org.apache.hadoop.yarn.client.api.YarnClientApplication

  @SuppressWarnings({ "unchecked", "deprecation" })
  private void launchApp(String appName, String queue, Integer amMB,
      String storm_zip_location) throws Exception {
    LOG.debug("JStormOnYarn:launchApp() ...");
        YarnClientApplication client_app = _yarn.createApplication();
        GetNewApplicationResponse app = client_app.getNewApplicationResponse();
        _appId = app.getApplicationId();
        LOG.debug("_appId:"+_appId);

        if(amMB > app.getMaximumResourceCapability().getMemory()) {
            //TODO need some sanity checks
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.