Examples of MiniMRCluster


Examples of org.apache.hadoop.mapred.MiniMRCluster

      e.printStackTrace();
      fail("Fail to startup HDFS cluster.");     
    }
    // Startup MR Cluster
    try {
      mr = new MiniMRCluster(NUM_HADOOP_SLAVES, fileSys.getUri()
          .toString(), 1);
    } catch(Exception e) {
      fail("Fail to startup Map/reduce cluster.");
    }
    // Startup collector
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

    conf.setBoolean("mapreduce.map.speculative", false);
    conf.setBoolean("mapreduce.reduce.speculative", false);
    ////

    // Allow the user to override FS URI for this map-reduce cluster to use.
    mrCluster = new MiniMRCluster(servers,
      FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1,
      null, null, new JobConf(this.conf));
    JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
    if (jobConf == null) {
      jobConf = mrCluster.createJobConf();
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

      // Builds and starts the mini dfs and mapreduce clusters
      System.setProperty("hadoop.log.dir", ".");
      m_dfs = new MiniDFSCluster(config, dataNodes, true, null);

      m_fileSys = m_dfs.getFileSystem();
      m_mr = new MiniMRCluster(taskTrackers, m_fileSys.getUri().toString(), 1);

      // Create the configuration hadoop-site.xml file
      File conf_dir = new File(System.getProperty("user.home"), "pigtest/conf/");
      conf_dir.mkdirs();
      File conf_file = new File(conf_dir, "hadoop-site.xml");
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

    qSkipSet = new HashSet<String>();

    if (miniMr) {
      dfs = ShimLoader.getHadoopShims().getMiniDfs(conf, 4, true, null);
      FileSystem fs = dfs.getFileSystem();
      mr = new MiniMRCluster(4, fs.getUri().toString(), 1);
    }

    initConf();

    testFiles = conf.get("test.data.files").replace('\\', '/')
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

            Configuration config = new Configuration();
           
            // Builds and starts the mini dfs and mapreduce clusters
            m_dfs = new MiniDFSCluster(config, dataNodes, true, null);
            m_fileSys = m_dfs.getFileSystem();
            m_mr = new MiniMRCluster(taskTrackers, m_fileSys.getUri().toString(), 1);
           
            // Create the configuration hadoop-site.xml file
            File conf_dir = new File(System.getProperty("user.home"), "pigtest/conf/");
            conf_dir.mkdirs();
            File conf_file = new File(conf_dir, "hadoop-site.xml");
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

            fileSystem.setPermission(new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
            String nnURI = fileSystem.getUri().toString();
            int numDirs = 1;
            String[] racks = null;
            String[] hosts = null;
            mrCluster = new MiniMRCluster(0, 0, taskTrackers, nnURI, numDirs, racks, hosts, null, conf);
            JobConf jobConf = mrCluster.createJobConf();
            System.setProperty(OOZIE_TEST_JOB_TRACKER, jobConf.get("mapred.job.tracker"));
            System.setProperty(OOZIE_TEST_NAME_NODE, jobConf.get("fs.default.name"));
            ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
            Runtime.getRuntime().addShutdownHook(new Thread() {
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

    conf.set("yarn.scheduler.capacity.root.queues", "default");
    conf.set("yarn.scheduler.capacity.root.default.capacity", "100");

    FileSystem fs = FileSystem.get(conf);
    System.setProperty("hadoop.log.dir", new File(workDir, "/logs").getAbsolutePath());
    mrCluster = new MiniMRCluster(1, fs.getUri().toString(), 1, null, null,
      new JobConf(conf));
    mrConf = mrCluster.createJobConf();
    fs.mkdirs(warehousedir);

    initializeSetup();
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

    fs = FileSystem.get(conf);
    System.setProperty("hadoop.log.dir", new File(workDir, "/logs").getAbsolutePath());
    // LocalJobRunner does not work with mapreduce OutputCommitter. So need
    // to use MiniMRCluster. MAPREDUCE-2350
    mrCluster = new MiniMRCluster(1, fs.getUri().toString(), 1, null, null,
      new JobConf(conf));
    mrConf = mrCluster.createJobConf();
  }
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

    fs = FileSystem.get(conf);
    System.setProperty("hadoop.log.dir", new File(fs.getWorkingDirectory()
        .toString(), "/logs").getAbsolutePath());
    // LocalJobRunner does not work with mapreduce OutputCommitter. So need
    // to use MiniMRCluster. MAPREDUCE-2350
    mrCluster = new MiniMRCluster(1, fs.getUri().toString(), 1, null, null,
        new JobConf(conf));
    mrConf = mrCluster.createJobConf();

    if (isServerRunning) {
      return;
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

      // Builds and starts the mini dfs and mapreduce clusters
      System.setProperty("hadoop.log.dir", ".");
      m_dfs = new MiniDFSCluster(config, dataNodes, true, null);

      m_fileSys = m_dfs.getFileSystem();
      m_mr = new MiniMRCluster(taskTrackers, m_fileSys.getUri().toString(), 1);

      // Create the configuration hadoop-site.xml file
      File conf_dir = new File(System.getProperty("user.home"), "pigtest/conf/");
      conf_dir.mkdirs();
      File conf_file = new File(conf_dir, "hadoop-site.xml");
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.