Package org.apache.hadoop.yarn

Examples of org.apache.hadoop.yarn.YarnException


      String diagnostic) {
    UserGroupInformation currentUser = null;
    try {
      currentUser = UserGroupInformation.getCurrentUser();
    } catch (IOException e) {
      throw new YarnException(e);
    }
    Job newJob = new TestJob(getJobId(), getAttemptID(), conf,
        getDispatcher().getEventHandler(),
            getTaskAttemptListener(), getContext().getClock(),
            isNewApiCommitter(), currentUser.getUserName(), getContext(),
View Full Code Here


      // run the scheduler
      try {
        super.heartbeat();
      } catch (Exception e) {
        LOG.error("error in heartbeat ", e);
        throw new YarnException(e);
      }

      List<TaskAttemptContainerAssignedEvent> result
        = new ArrayList<TaskAttemptContainerAssignedEvent>(events);
      events.clear();
View Full Code Here

      }

      server.start();
      this.address = NetUtils.getConnectAddress(server);
    } catch (IOException e) {
      throw new YarnException(e);
    }
  }
View Full Code Here

      doneDirFc = FileContext.getFileContext(doneDirPrefixPath.toUri(), conf);
      doneDirFc.setUMask(JobHistoryUtils.HISTORY_DONE_DIR_UMASK);
      mkdir(doneDirFc, doneDirPrefixPath, new FsPermission(
          JobHistoryUtils.HISTORY_DONE_DIR_PERMISSION));
    } catch (IOException e) {
      throw new YarnException("Error creating done directory: ["
          + doneDirPrefixPath + "]", e);
    }

    String intermediateDoneDirPrefix = null;
    intermediateDoneDirPrefix = JobHistoryUtils
        .getConfiguredHistoryIntermediateDoneDirPrefix(conf);
    try {
      intermediateDoneDirPath = FileContext.getFileContext(conf).makeQualified(
          new Path(intermediateDoneDirPrefix));
      intermediateDoneDirFc = FileContext.getFileContext(
          intermediateDoneDirPath.toUri(), conf);
      mkdir(intermediateDoneDirFc, intermediateDoneDirPath, new FsPermission(
          JobHistoryUtils.HISTORY_INTERMEDIATE_DONE_DIR_PERMISSIONS.toShort()));
    } catch (IOException e) {
      LOG.info("error creating done directory on dfs " + e);
      throw new YarnException("Error creating intermediate done directory: ["
          + intermediateDoneDirPath + "]", e);
    }

    this.aclsMgr = new JobACLsManager(conf);
View Full Code Here

      // run the scheduler
      try {
        super.heartbeat();
      } catch (Exception e) {
        LOG.error("error in heartbeat ", e);
        throw new YarnException(e);
      }

      List<TaskAttemptContainerAssignedEvent> result
        = new ArrayList<TaskAttemptContainerAssignedEvent>(events);
      events.clear();
View Full Code Here

        allTaskSplitMetaInfo = SplitMetaInfoReader.readSplitMetaInfo(
            job.oldJobId, job.fs,
            job.conf,
            job.remoteJobSubmitDir);
      } catch (IOException e) {
        throw new YarnException(e);
      }
      return allTaskSplitMetaInfo;
    }
View Full Code Here

          + "of RMContainerAllocator: " + remCapacity);
    }
    try {
      eventQueue.put(event);
    } catch (InterruptedException e) {
      throw new YarnException(e);
    }
  }
View Full Code Here

      // re-trying until the retryInterval has expired.
      if (System.currentTimeMillis() - retrystartTime >= retryInterval) {
        LOG.error("Could not contact RM after " + retryInterval + " milliseconds.");
        eventHandler.handle(new JobEvent(this.getJob().getID(),
                                         JobEventType.INTERNAL_ERROR));
        throw new YarnException("Could not contact RM after " +
                                retryInterval + " milliseconds.");
      }
      // Throw this up to the caller, which may decide to ignore it and
      // continue to attempt to contact the RM.
      throw e;
    }
    if (response.getReboot()) {
      // This can happen if the RM has been restarted. If it is in that state,
      // this application must clean itself up.
      eventHandler.handle(new JobEvent(this.getJob().getID(),
                                       JobEventType.INTERNAL_ERROR));
      throw new YarnException("Resource Manager doesn't recognize AttemptId: " +
                               this.getContext().getApplicationID());
    }
    int newHeadRoom = getAvailableResources() != null ? getAvailableResources().getMemory() : 0;
    List<Container> newContainers = response.getAllocatedContainers();
    List<ContainerStatus> finishedContainers = response.getCompletedContainersStatuses();
View Full Code Here

      Apps.addToEnvironment(
          environment, 
          Environment.CLASSPATH.name(),
          getInitialClasspath(conf));
    } catch (IOException e) {
      throw new YarnException(e);
    }

    // Shell
    environment.put(
        Environment.SHELL.name(),
View Full Code Here

    Path applicationTokensFile =
        new Path(jobSubmitDir, MRJobConfig.APPLICATION_TOKENS_FILE);
    try {
      ts.writeTokenStorageFile(applicationTokensFile, conf);
    } catch (IOException e) {
      throw new YarnException(e);
    }

    // Construct necessary information to start the MR AM
    ApplicationSubmissionContext appContext =
      createApplicationSubmissionContext(conf, jobSubmitDir, ts);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.yarn.YarnException

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.