Package org.apache.hadoop.security

Examples of org.apache.hadoop.security.UserGroupInformation


    checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600);
    checkpointSize = conf.getLong("fs.checkpoint.size", 4194304);

    // initialize the webserver for uploading files.
    // Kerberized SSL servers must be run from the host principal...
    UserGroupInformation httpUGI =
      UserGroupInformation.loginUserFromKeytabAndReturnUGI(
          SecurityUtil.getServerPrincipal(conf
        .get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY),
        infoBindAddress),
        conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY));
    try {
      infoServer = httpUGI.doAs(new PrivilegedExceptionAction<HttpServer>() {

        @Override
        public HttpServer run() throws IOException, InterruptedException {
          LOG.info("Starting web server as: " +
              UserGroupInformation.getCurrentUser().getUserName());
View Full Code Here


    }
  }

  public void run() {
    if (UserGroupInformation.isSecurityEnabled()) {
      UserGroupInformation ugi = null;
      try {
        ugi = UserGroupInformation.getLoginUser();
      } catch (IOException e) {
        LOG.error(StringUtils.stringifyException(e));
        e.printStackTrace();
        Runtime.getRuntime().exit(-1);
      }
      ugi.doAs(new PrivilegedAction<Object>() {
        @Override
        public Object run() {
          doWork();
          return null;
        }
View Full Code Here

    final Map<String,String[]> pmap = request.getParameterMap();
    final PrintWriter out = response.getWriter();
    final ServletContext context = getServletContext();
    final Configuration conf =
      (Configuration) context.getAttribute(JspHelper.CURRENT_CONF);
    final UserGroupInformation ugi = getUGI(request, conf);
    try {
      ugi.doAs(new PrivilegedExceptionAction<Object>() {
        @Override
        public Object run() throws Exception {
          final NameNode nn = (NameNode) context.getAttribute("name.node");
          final int totalDatanodes = nn.namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE);
          final short minReplication = nn.namesystem.getMinReplication();
View Full Code Here

   * Verify the method isUserAllowed()
   */
  public void testIsUserAllowed() {
    AccessControlList acl;

    UserGroupInformation drwho =
        UserGroupInformation.createUserForTesting("drwho@APACHE.ORG",
            new String[] { "aliens", "humanoids", "timelord" });
    UserGroupInformation susan =
        UserGroupInformation.createUserForTesting("susan@APACHE.ORG",
            new String[] { "aliens", "humanoids", "timelord" });
    UserGroupInformation barbara =
        UserGroupInformation.createUserForTesting("barbara@APACHE.ORG",
            new String[] { "humans", "teachers" });
    UserGroupInformation ian =
        UserGroupInformation.createUserForTesting("ian@APACHE.ORG",
            new String[] { "humans", "teachers" });

    acl = new AccessControlList("drwho humanoids");
    assertUserAllowed(drwho, acl);
View Full Code Here

    final Server server = RPC.getServer(mockTT,
        ADDRESS, 0, 5, true, conf, sm);

    server.start();

    final UserGroupInformation current = UserGroupInformation.getCurrentUser();
    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
    String jobId = current.getUserName();
    JobTokenIdentifier tokenId = new JobTokenIdentifier(new Text(jobId));
    Token<JobTokenIdentifier> token = new Token<JobTokenIdentifier>(tokenId, sm);
    sm.addTokenForJob(jobId, token);
    Text host = new Text(addr.getAddress().getHostAddress() + ":"
        + addr.getPort());
    token.setService(host);
    LOG.info("Service IP address for token is " + host);
    current.addToken(token);
    current.doAs(new PrivilegedExceptionAction<Object>() {
      @Override
      public Object run() throws Exception {
        TaskUmbilicalProtocol proxy = null;
        try {
          proxy = (TaskUmbilicalProtocol) RPC.getProxy(
View Full Code Here

      this.pingInterval = remoteId.getPingInterval();
      if (LOG.isDebugEnabled()) {
        LOG.debug("The ping interval is" + this.pingInterval + "ms.");
      }
     
      UserGroupInformation ticket = remoteId.getTicket();
      Class<?> protocol = remoteId.getProtocol();
      this.useSasl = UserGroupInformation.isSecurityEnabled();
      if (useSasl && protocol != null) {
        TokenInfo tokenInfo = protocol.getAnnotation(TokenInfo.class);
        if (tokenInfo != null) {
          TokenSelector<? extends TokenIdentifier> tokenSelector = null;
          try {
            tokenSelector = tokenInfo.value().newInstance();
          } catch (InstantiationException e) {
            throw new IOException(e.toString());
          } catch (IllegalAccessException e) {
            throw new IOException(e.toString());
          }
          InetSocketAddress addr = remoteId.getAddress();
          token = tokenSelector.selectToken(new Text(addr.getAddress()
              .getHostAddress() + ":" + addr.getPort()),
              ticket.getTokens());
        }
        KerberosInfo krbInfo = protocol.getAnnotation(KerberosInfo.class);
        if (krbInfo != null) {
          serverPrincipal = remoteId.getServerPrincipal();
          if (LOG.isDebugEnabled()) {
            LOG.debug("RPC Server's Kerberos principal name for protocol="
                + protocol.getCanonicalName() + " is " + serverPrincipal);
          }
        }
      }
     
      if (!useSasl) {
        authMethod = AuthMethod.SIMPLE;
      } else if (token != null) {
        authMethod = AuthMethod.DIGEST;
      } else {
        authMethod = AuthMethod.KERBEROS;
      }
     
      header = new ConnectionHeader(protocol == null ? null : protocol
          .getName(), ticket, authMethod);
     
      if (LOG.isDebugEnabled())
        LOG.debug("Use " + authMethod + " authentication for protocol "
            + protocol.getSimpleName());
     
      this.setName("IPC Client (" + socketFactory.hashCode() +") connection to " +
          remoteId.getAddress().toString() +
          " from " + ((ticket==null)?"an unknown user":ticket.getUserName()));
      this.setDaemon(true);
    }
View Full Code Here

        }
      }
    }
   
    private synchronized boolean shouldAuthenticateOverKrb() throws IOException {
      UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
      UserGroupInformation currentUser =
        UserGroupInformation.getCurrentUser();
      UserGroupInformation realUser = currentUser.getRealUser();
      if (authMethod == AuthMethod.KERBEROS &&
          loginUser != null &&
          //Make sure user logged in using Kerberos either keytab or TGT
          loginUser.hasKerberosCredentials() &&
          // relogin only in case it is the login user (e.g. JT)
View Full Code Here

          OutputStream outStream = NetUtils.getOutputStream(socket);
          writeRpcHeader(outStream);
          if (useSasl) {
            final InputStream in2 = inStream;
            final OutputStream out2 = outStream;
            UserGroupInformation ticket = remoteId.getTicket();
            if (authMethod == AuthMethod.KERBEROS) {
              if (ticket.getRealUser() != null) {
                ticket = ticket.getRealUser();
              }
            }
            boolean continueSasl = false;
            try {
              continueSasl =
                ticket.doAs(new PrivilegedExceptionAction<Boolean>() {
                  @Override
                  public Boolean run() throws IOException {
                    return setupSaslConnection(in2, out2);
                  }
                });
View Full Code Here

  }

  public void testHftpAccessControl() throws Exception {
    MiniDFSCluster cluster = null;
    try {
      final UserGroupInformation DFS_UGI = createUGI("dfs", true);
      final UserGroupInformation USER_UGI = createUGI("user", false);

      //start cluster by DFS_UGI
      final Configuration dfsConf = new Configuration();
      cluster = new MiniDFSCluster(dfsConf, 2, true, null);
      cluster.waitActive();

      final String httpAdd = dfsConf.get("dfs.http.address");
      final URI nnURI = FileSystem.getDefaultUri(dfsConf);
      final String nnUri = nnURI.toString();
      FileSystem fs1 = DFS_UGI.doAs(new PrivilegedExceptionAction<FileSystem>() {
        public FileSystem run() throws IOException {
          return FileSystem.get(nnURI, dfsConf);
        }
      });
      final Path home =
        createHomeDirectory(fs1, USER_UGI);
     
      //now, login as USER_UGI
      final Configuration userConf = new Configuration();
      final FileSystem fs =
        USER_UGI.doAs(new PrivilegedExceptionAction<FileSystem>() {
        public FileSystem run() throws IOException {
          return FileSystem.get(nnURI, userConf);
        }
      });
     
      final Path srcrootpath = new Path(home, "src_root");
      final String srcrootdir =  srcrootpath.toString();
      final Path dstrootpath = new Path(home, "dst_root");
      final String dstrootdir =  dstrootpath.toString();
      final DistCp distcp = USER_UGI.doAs(new PrivilegedExceptionAction<DistCp>() {
        public DistCp run() {
          return new DistCp(userConf);
        }
      });

      FileSystem.mkdirs(fs, srcrootpath, new FsPermission((short)0700));
      final String[] args = {"hftp://"+httpAdd+srcrootdir, nnUri+dstrootdir};

      { //copy with permission 000, should fail
        fs.setPermission(srcrootpath, new FsPermission((short)0));
        USER_UGI.doAs(new PrivilegedExceptionAction<Void>() {
          public Void run() throws Exception {
            assertEquals(-3, ToolRunner.run(distcp, args));
            return null;
          }
        });
View Full Code Here

      cluster.setLeasePeriod(softLease, hardLease);

      // try to re-open the file before closing the previous handle. This
      // should fail but will trigger lease recovery.
      {
        UserGroupInformation ugi =
          UserGroupInformation.createUserForTesting(fakeUsername,
                                                    new String [] { fakeGroup});
       
        FileSystem dfs2 = DFSTestUtil.getFileSystemAs(ugi, conf);
 
View Full Code Here

TOP

Related Classes of org.apache.hadoop.security.UserGroupInformation

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.