Package org.apache.hadoop.conf

Examples of org.apache.hadoop.conf.Configuration.writeXml()


        dfsCluster.getFileSystem().getConf());

    File testConfFile = new File(TEST_ROOT_DIR, "test.xml");
    try {
      testConfFile.createNewFile();
      testConf.writeXml(new FileOutputStream(testConfFile));
      testConfFile.deleteOnExit();
    } catch (IOException e) {
      // TODO Auto-generated catch block
      e.printStackTrace();
      throw new RuntimeException(e);
View Full Code Here


    confFilePath = new Path(workDir.getAbsolutePath(), YARN_CLUSTER_CONFIG);
    File confFile = new File(confFilePath.toString());
    try {
      confFile.createNewFile();
      conf.writeXml(new FileOutputStream(confFile));
      confFile.deleteOnExit();
    } catch (IOException e) {
      // TODO Auto-generated catch block
      e.printStackTrace();
      throw new RuntimeException(e);
View Full Code Here

        .setHistoryText(iReduceStageHistoryText), 2);
    ivertex.addTaskLocalFiles(commonLocalResources);
    vertices.add(ivertex);

    ByteArrayOutputStream finalReduceOutputStream = new ByteArrayOutputStream(4096);
    finalReduceConf.writeXml(finalReduceOutputStream);
    String finalReduceStageHistoryText = new String(finalReduceOutputStream.toByteArray(), "UTF-8");
    UserPayload finalReducePayload = TezUtils.createUserPayloadFromConf(finalReduceConf);
    Vertex finalReduceVertex = Vertex.create("finalreduce",
        ProcessorDescriptor.create(
            ReduceProcessor.class.getName())
View Full Code Here

      String key = (String) e.nextElement();
      LOG.debug("Adding " + key + schedulerConfProps.getProperty(key));
      config.set(key, schedulerConfProps.getProperty(key));
    }

    config.writeXml(out);
    out.close();

    LOG.info("setting resource path where capacity-scheduler's config file "
        + "is placed to " + myResourcePath);
    System.setProperty(MY_SCHEDULER_CONF_PATH_PROPERTY, myResourcePath);
View Full Code Here

      Configuration yarnClusterConfig = yarnCluster.getConfig();
      yarnClusterConfig.set("yarn.application.classpath", new File(url.getPath()).getParent());
      //write the document to a buffer (not directly to the file, as that
      //can cause the file being written to get read -which will then fail.
      ByteArrayOutputStream bytesOut = new ByteArrayOutputStream();
      yarnClusterConfig.writeXml(bytesOut);
      bytesOut.close();
      //write the bytes to the file in the classpath
      OutputStream os = new FileOutputStream(new File(url.getPath()));
      os.write(bytesOut.toByteArray());
      os.close();
View Full Code Here

        // (.staging/jobid/files - RecoveryService will need to be patched)
        FSDataOutputStream jobFileOut = null;
        try {
          if (logDirConfPath != null) {
            jobFileOut = stagingDirFS.create(logDirConfPath, true);
            conf.writeXml(jobFileOut);
            jobFileOut.close();
          }
        } catch (IOException e) {
          LOG.info("Failed to write the job configuration file", e);
          throw e;
View Full Code Here

            "Could not find 'yarn-site.xml' dummy file in classpath");
      }
      //write the document to a buffer (not directly to the file, as that
      //can cause the file being written to get read -which will then fail.
      ByteArrayOutputStream bytesOut = new ByteArrayOutputStream();
      yarnClusterConfig.writeXml(bytesOut);
      bytesOut.close();
      //write the bytes to the file in the classpath
      OutputStream os = new FileOutputStream(new File(url.getPath()));
      os.write(bytesOut.toByteArray());
      os.close();
View Full Code Here

    String fsDefaultName = getProxiedFSURI();
    Configuration conf = new Configuration(false);
    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName);
    File hdfsSite = new File(new File(homeDir, "conf"), "hdfs-site.xml");
    OutputStream os = new FileOutputStream(hdfsSite);
    conf.writeXml(os);
    os.close();

    //HTTPFS configuration
    conf = new Configuration(false);
    conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".groups",
View Full Code Here

    conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts",
             HadoopUsersConfTestHelper.getHadoopProxyUserHosts());
    conf.set("httpfs.authentication.signature.secret.file", secretFile.getAbsolutePath());
    File httpfsSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
    os = new FileOutputStream(httpfsSite);
    conf.writeXml(os);
    os.close();

    ClassLoader cl = Thread.currentThread().getContextClassLoader();
    URL url = cl.getResource("webapp");
    WebAppContext context = new WebAppContext(url.getPath(), "/webhdfs");
View Full Code Here

    File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml");
    OutputStream os = new FileOutputStream(hdfsSite);
    Configuration hadoopConf = new Configuration(false);
    hadoopConf.set("foo", "BAR");
    hadoopConf.writeXml(os);
    os.close();

    Server server = new Server("server", dir, dir, dir, dir, conf);
    server.init();
    FileSystemAccessService fsAccess = (FileSystemAccessService) server.get(FileSystemAccess.class);
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.