Package org.apache.hadoop.hdfs

Examples of org.apache.hadoop.hdfs.HdfsConfiguration.addResource()


  /** {@inheritDoc} */
  public void init(FilterConfig filterConfig) throws ServletException {
    ServletContext context = filterConfig.getServletContext();
    Configuration conf = new HdfsConfiguration(false);
    conf.addResource("hdfsproxy-default.xml");
    conf.addResource("ssl-server.xml");
    conf.addResource("hdfsproxy-site.xml");
    String nn = conf.get("hdfsproxy.dfs.namenode.address");
    if (nn == null) {
      throw new ServletException("Proxy source cluster name node address not speficied");
View Full Code Here


  /** {@inheritDoc} */
  public void init(FilterConfig filterConfig) throws ServletException {
    ServletContext context = filterConfig.getServletContext();
    Configuration conf = new HdfsConfiguration(false);
    conf.addResource("hdfsproxy-default.xml");
    conf.addResource("ssl-server.xml");
    conf.addResource("hdfsproxy-site.xml");
    String nn = conf.get("hdfsproxy.dfs.namenode.address");
    if (nn == null) {
      throw new ServletException("Proxy source cluster name node address not speficied");
    }
View Full Code Here

  public void init(FilterConfig filterConfig) throws ServletException {
    ServletContext context = filterConfig.getServletContext();
    Configuration conf = new HdfsConfiguration(false);
    conf.addResource("hdfsproxy-default.xml");
    conf.addResource("ssl-server.xml");
    conf.addResource("hdfsproxy-site.xml");
    String nn = conf.get("hdfsproxy.dfs.namenode.address");
    if (nn == null) {
      throw new ServletException("Proxy source cluster name node address not speficied");
    }
    InetSocketAddress nAddr = NetUtils.createSocketAddr(nn);
View Full Code Here

    if (conf.getResource(permLoc) == null) {
      LOG.warn("HdfsProxy user permissions file not found");
      return null;
    }
    Configuration permConf = new HdfsConfiguration(false);
    permConf.addResource(permLoc);
    Map<String, Set<Path>> map = new HashMap<String, Set<Path>>();
    for (Map.Entry<String, String> e : permConf) {
      String k = e.getKey();
      String v = e.getValue();
      if (k != null && k.length() != 0 && v != null && v.length() != 0) {
View Full Code Here

    if (conf.getResource(certsLoc) == null) {
      LOG.warn("HdfsProxy user certs file not found");
      return null;
    }
    Configuration certsConf = new HdfsConfiguration(false);
    certsConf.addResource(certsLoc);
    Map<String, Set<BigInteger>> map = new HashMap<String, Set<BigInteger>>();
    for (Map.Entry<String, String> e : certsConf) {
      String k = e.getKey();
      String v = e.getValue().trim();
      if (k != null && k.length() != 0 && v != null && v.length() != 0) {
View Full Code Here

              .getParameter("filename"));
        }
      } else if (RELOAD_PATTERN.matcher(servletPath).matches()
          && checkUser("Admin", certs[0])) {
        Configuration conf = new HdfsConfiguration(false);
        conf.addResource("hdfsproxy-default.xml");
        Map<String, Set<Path>> permsMap = getPermMap(conf);
        Map<String, Set<BigInteger>> certsMap = getCertsMap(conf);
        if (permsMap == null || certsMap == null) {
          LOG.warn("Permission files reloading failed");
          rsp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR,
View Full Code Here

      // In case of IMPORT this will get rid of default directories
      // but will retain directories specified in hdfs-site.xml
      // When importing image from a checkpoint, the name-node can
      // start with empty set of storage directories.
      Configuration cE = new HdfsConfiguration(false);
      cE.addResource("core-default.xml");
      cE.addResource("core-site.xml");
      cE.addResource("hdfs-default.xml");
      Collection<String> dirNames2 = cE.getTrimmedStringCollection(propertyName);
      dirNames.removeAll(dirNames2);
      if(dirNames.isEmpty())
View Full Code Here

      // but will retain directories specified in hdfs-site.xml
      // When importing image from a checkpoint, the name-node can
      // start with empty set of storage directories.
      Configuration cE = new HdfsConfiguration(false);
      cE.addResource("core-default.xml");
      cE.addResource("core-site.xml");
      cE.addResource("hdfs-default.xml");
      Collection<String> dirNames2 = cE.getTrimmedStringCollection(propertyName);
      dirNames.removeAll(dirNames2);
      if(dirNames.isEmpty())
        LOG.warn("!!! WARNING !!!" +
View Full Code Here

      // When importing image from a checkpoint, the name-node can
      // start with empty set of storage directories.
      Configuration cE = new HdfsConfiguration(false);
      cE.addResource("core-default.xml");
      cE.addResource("core-site.xml");
      cE.addResource("hdfs-default.xml");
      Collection<String> dirNames2 = cE.getTrimmedStringCollection(propertyName);
      dirNames.removeAll(dirNames2);
      if(dirNames.isEmpty())
        LOG.warn("!!! WARNING !!!" +
          "\n\tThe NameNode currently runs without persistent storage." +
View Full Code Here

      // In case of IMPORT this will get rid of default directories
      // but will retain directories specified in hdfs-site.xml
      // When importing image from a checkpoint, the name-node can
      // start with empty set of storage directories.
      Configuration cE = new HdfsConfiguration(false);
      cE.addResource("core-default.xml");
      cE.addResource("core-site.xml");
      cE.addResource("hdfs-default.xml");
      Collection<String> dirNames2 = cE.getTrimmedStringCollection(propertyName);
      dirNames.removeAll(dirNames2);
      if(dirNames.isEmpty())
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.