Package org.apache.hadoop.security

Examples of org.apache.hadoop.security.UnixUserGroupInformation


    return jobSubmitClient.jobsToComplete();
  }

  private UnixUserGroupInformation getUGI(Configuration conf)
      throws IOException {
    UnixUserGroupInformation ugi = null;
    try {
      ugi = UnixUserGroupInformation.login(conf, true);
    } catch (LoginException e) {
      throw (IOException) (new IOException(
          "Failed to get the current user's information.").initCause(e));
View Full Code Here


    /*
     * set this user's id in job configuration, so later job files can be
     * accessed using this user's id
     */
    UnixUserGroupInformation ugi = getUGI(job.getConf());

    ClusterStatus clusterStatus = getClusterStatus(true);

    // check the number of BSP tasks
    int tasks = job.getNumBspTask();
    int maxTasks = clusterStatus.getMaxTasks();

    if (tasks <= 0 || tasks > maxTasks) {
      LOG.warn("The number of tasks you've entered was invalid. Using default value of "
          + maxTasks + "!");
      job.setNumBspTask(maxTasks);
    }

    // Create a number of filenames in the BSPMaster's fs namespace
    FileSystem fs = getFs();
    fs.delete(submitJobDir, true);
    submitJobDir = fs.makeQualified(submitJobDir);
    submitJobDir = new Path(submitJobDir.toUri().getPath());
    FsPermission bspSysPerms = new FsPermission(JOB_DIR_PERMISSION);
    FileSystem.mkdirs(fs, submitJobDir, bspSysPerms);
    fs.mkdirs(submitJobDir);
    short replication = (short) job.getInt("bsp.submit.replication", 10);

    String originalJarPath = job.getJar();

    if (originalJarPath != null) { // copy jar to BSPMaster's fs
      // use jar name if job is not named.
      if ("".equals(job.getJobName())) {
        job.setJobName(new Path(originalJarPath).getName());
      }
      job.setJar(submitJarFile.toString());
      fs.copyFromLocalFile(new Path(originalJarPath), submitJarFile);

      fs.setReplication(submitJarFile, replication);
      fs.setPermission(submitJarFile, new FsPermission(JOB_FILE_PERMISSION));
    } else {
      LOG.warn("No job jar file set.  User classes may not be found. "
          + "See BSPJob#setJar(String) or check Your jar file.");
    }

    // Set the user's name and working directory
    job.setUser(ugi.getUserName());
    if (ugi.getGroupNames().length > 0) {
      job.set("group.name", ugi.getGroupNames()[0]);
    }
    if (job.getWorkingDirectory() == null) {
      job.setWorkingDirectory(fs.getWorkingDirectory());
    }

View Full Code Here

   * GET http://<nn>:<port>/data[/<path>] HTTP/1.1
   * }
   */
  public void doGet(HttpServletRequest request, HttpServletResponse response)
    throws IOException {
    final UnixUserGroupInformation ugi = getUGI(request);
    final ClientProtocol nnproxy = createNameNodeProxy(ugi);

    try {
      final String path = request.getPathInfo() != null
        ? request.getPathInfo() : "/";
View Full Code Here

  /** Get {@link UserGroupInformation} from request */
  protected UnixUserGroupInformation getUGI(HttpServletRequest request) {
    String ugi = request.getParameter("ugi");
    try {
      return new UnixUserGroupInformation(ugi.split(","));
    }
    catch(Exception e) {
      LOG.warn("Invalid ugi (= " + ugi + ")");
    }
    return JspHelper.webUGI;
View Full Code Here

        final UserGroupInformation superuser = UserGroupInformation.getCurrentUGI();
        String username = "testappenduser";
        String group = "testappendgroup";
        assertFalse(superuser.getUserName().equals(username));
        assertFalse(Arrays.asList(superuser.getGroupNames()).contains(group));
        UnixUserGroupInformation appenduser = UnixUserGroupInformation.createImmutable(
            new String[]{username, group});
        UnixUserGroupInformation.saveToConf(conf,
            UnixUserGroupInformation.UGI_PROPERTY_NAME, appenduser);
        fs = FileSystem.get(conf);
View Full Code Here

  }

  public void testHftpAccessControl() throws Exception {
    MiniDFSCluster cluster = null;
    try {
      final UnixUserGroupInformation DFS_UGI = createUGI("dfs", true);
      final UnixUserGroupInformation USER_UGI = createUGI("user", false);

      //start cluster by DFS_UGI
      final Configuration dfsConf = new Configuration();
      UnixUserGroupInformation.saveToConf(dfsConf,
          UnixUserGroupInformation.UGI_PROPERTY_NAME, DFS_UGI);
View Full Code Here

   
    /*
     * set this user's id in job configuration, so later job files can be
     * accessed using this user's id
     */
    UnixUserGroupInformation ugi = getUGI(job);
     
    //
    // Figure out what fs the JobTracker is using.  Copy the
    // job to it, under a temporary name.  This allows DFS to work,
    // and under the local fs also provides UNIX-like object loading
    // semantics.  (that is, if the job file is deleted right after
    // submission, we can still run the submission to completion)
    //

    // Create a number of filenames in the JobTracker's fs namespace
    FileSystem fs = getFs();
    LOG.debug("default FileSystem: " + fs.getUri());
    fs.delete(submitJobDir, true);
    submitJobDir = fs.makeQualified(submitJobDir);
    submitJobDir = new Path(submitJobDir.toUri().getPath());
    FsPermission mapredSysPerms = new FsPermission(JOB_DIR_PERMISSION);
    FileSystem.mkdirs(fs, submitJobDir, mapredSysPerms);
    Path filesDir = new Path(submitJobDir, "files");
    Path archivesDir = new Path(submitJobDir, "archives");
    Path libjarsDir = new Path(submitJobDir, "libjars");
    short replication = (short)job.getInt("mapred.submit.replication", 10);
    // add all the command line files/ jars and archive
    // first copy them to jobtrackers filesystem
   
    if (files != null) {
      FileSystem.mkdirs(fs, filesDir, mapredSysPerms);
      String[] fileArr = files.split(",");
      for (String tmpFile: fileArr) {
        Path tmp = new Path(tmpFile);
        Path newPath = copyRemoteFiles(fs,filesDir, tmp, job, replication);
        try {
          URI pathURI = new URI(newPath.toUri().toString() + "#" + newPath.getName());
          DistributedCache.addCacheFile(pathURI, job);
        } catch(URISyntaxException ue) {
          //should not throw a uri exception
          throw new IOException("Failed to create uri for " + tmpFile);
        }
        DistributedCache.createSymlink(job);
      }
    }
   
    if (libjars != null) {
      FileSystem.mkdirs(fs, libjarsDir, mapredSysPerms);
      String[] libjarsArr = libjars.split(",");
      for (String tmpjars: libjarsArr) {
        Path tmp = new Path(tmpjars);
        Path newPath = copyRemoteFiles(fs, libjarsDir, tmp, job, replication);
        DistributedCache.addArchiveToClassPath(newPath, job);
      }
    }
   
   
    if (archives != null) {
     FileSystem.mkdirs(fs, archivesDir, mapredSysPerms);
     String[] archivesArr = archives.split(",");
     for (String tmpArchives: archivesArr) {
       Path tmp = new Path(tmpArchives);
       Path newPath = copyRemoteFiles(fs, archivesDir, tmp, job, replication);
       try {
         URI pathURI = new URI(newPath.toUri().toString() + "#" + newPath.getName());
         DistributedCache.addCacheArchive(pathURI, job);
       } catch(URISyntaxException ue) {
         //should not throw an uri excpetion
         throw new IOException("Failed to create uri for " + tmpArchives);
       }
       DistributedCache.createSymlink(job);
     }
    }
   
    //  set the timestamps of the archives and files
    URI[] tarchives = DistributedCache.getCacheArchives(job);
    if (tarchives != null) {
      StringBuffer archiveTimestamps =
        new StringBuffer(String.valueOf(DistributedCache.getTimestamp(job, tarchives[0])));
      for (int i = 1; i < tarchives.length; i++) {
        archiveTimestamps.append(",");
        archiveTimestamps.append(String.valueOf(DistributedCache.getTimestamp(job, tarchives[i])));
      }
      DistributedCache.setArchiveTimestamps(job, archiveTimestamps.toString());
    }

    URI[] tfiles = DistributedCache.getCacheFiles(job);
    if (tfiles != null) {
      StringBuffer fileTimestamps =
        new StringBuffer(String.valueOf(DistributedCache.getTimestamp(job, tfiles[0])));
      for (int i = 1; i < tfiles.length; i++) {
        fileTimestamps.append(",");
        fileTimestamps.append(String.valueOf(DistributedCache.getTimestamp(job, tfiles[i])));
      }
      DistributedCache.setFileTimestamps(job, fileTimestamps.toString());
    }
      
    String originalJarPath = job.getJar();

    if (originalJarPath != null) {           // copy jar to JobTracker's fs
      // use jar name if job is not named.
      if ("".equals(job.getJobName())){
        job.setJobName(new Path(originalJarPath).getName());
      }
      job.setJar(submitJarFile.toString());
      fs.copyFromLocalFile(new Path(originalJarPath), submitJarFile);
      fs.setReplication(submitJarFile, replication);
      fs.setPermission(submitJarFile, new FsPermission(JOB_FILE_PERMISSION));
    } else {
      LOG.warn("No job jar file set.  User classes may not be found. "+
               "See JobConf(Class) or JobConf#setJar(String).");
    }

    // Set the user's name and working directory
    job.setUser(ugi.getUserName());
    if (ugi.getGroupNames().length > 0) {
      job.set("group.name", ugi.getGroupNames()[0]);
    }
    if (job.getWorkingDirectory() == null) {
      job.setWorkingDirectory(fs.getWorkingDirectory());         
    }

View Full Code Here

    }

  }

  private UnixUserGroupInformation getUGI(Configuration job) throws IOException {
    UnixUserGroupInformation ugi = null;
    try {
      ugi = UnixUserGroupInformation.login(job, true);
    } catch (LoginException e) {
      throw (IOException)(new IOException(
          "Failed to get the current user's information.").initCause(e));
View Full Code Here

      {
        Configuration conf2 = new Configuration(conf);
        String username = UserGroupInformation.getCurrentUGI().getUserName()+"_1";
        UnixUserGroupInformation.saveToConf(conf2,
            UnixUserGroupInformation.UGI_PROPERTY_NAME,
            new UnixUserGroupInformation(username, new String[]{"supergroup"}));
        FileSystem dfs2 = FileSystem.get(conf2);
 
        boolean done = false;
        for(int i = 0; i < 10 && !done; i++) {
          AppendTestUtil.LOG.info("i=" + i);
View Full Code Here

                 (Long.MAX_VALUE/1024/1024 + 1024) + "m", args[2]);
     
      // 17:  setQuota by a non-administrator
      UnixUserGroupInformation.saveToConf(conf,
          UnixUserGroupInformation.UGI_PROPERTY_NAME,
          new UnixUserGroupInformation(new String[]{"userxx\n", "groupyy\n"}));
      DFSAdmin userAdmin = new DFSAdmin(conf);
      args[1] = "100";
      runCommand(userAdmin, args, true);
      runCommand(userAdmin, true, "-setSpaceQuota", "1g", args[2]);
     
View Full Code Here

TOP

Related Classes of org.apache.hadoop.security.UnixUserGroupInformation

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.