Examples of MiniMRCluster


Examples of org.apache.hadoop.mapred.MiniMRCluster

    if (logDir == null) {
      logDir = tmpDir;
    }
    System.setProperty("hadoop.log.dir", logDir);
    c.set("mapred.output.dir", tmpDir);
    mrCluster = new MiniMRCluster(servers,
      FileSystem.get(conf).getUri().toString(), 1);
    LOG.info("Mini mapreduce cluster started");
    JobConf mrClusterJobConf = mrCluster.createJobConf();
    c.set("mapred.job.tracker", mrClusterJobConf.get("mapred.job.tracker"));
    /* this for mrv2 support */
 
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

      fileSystem.setPermission(new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
      String nnURI = fileSystem.getUri().toString();
      int numDirs = 1;
      String[] racks = null;
      String[] hosts = null;
      MR_CLUSTER = new MiniMRCluster(0, 0, taskTrackers, nnURI, numDirs, racks, hosts, null, conf);
      return MR_CLUSTER.createJobConf(conf);
    }
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

    b.close();
    return result.toString();
  }

  public void testWithLocal() throws Exception {
    MiniMRCluster mr = null;
    try {
      mr = new MiniMRCluster(2, "file:///", 3);
      Configuration conf = mr.createJobConf();
      runWordCount(conf);
      runMultiFileWordCount(conf);
    } finally {
      if (mr != null) { mr.shutdown(); }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

 
  protected void setUp() throws Exception {
    super.setUp();
    dfscluster = new MiniDFSCluster(new Configuration(), 2, true, null);
    fs = dfscluster.getFileSystem();
    mapred = new MiniMRCluster(2, fs.getUri().toString(), 1);
    inputPath = new Path(fs.getHomeDirectory(), "test");
    inputrelPath = new Path(fs.getHomeDirectory().toUri().
        getPath().substring(1), "test");
    filea = new Path(inputPath,"a");
    fileb = new Path(inputPath,"b");
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

  }

  public void testMultipleCachefiles() throws Exception
  {
    boolean mayExit = false;
    MiniMRCluster mr = null;
    MiniDFSCluster dfs = null;
    try{
      Configuration conf = new Configuration();
      dfs = new MiniDFSCluster(conf, 1, true, null);
      FileSystem fileSys = dfs.getFileSystem();
      String namenode = fileSys.getName();
      mr  = new MiniMRCluster(1, namenode, 3);
      // During tests, the default Configuration will use a local mapred
      // So don't specify -config or -cluster
      String strJobtracker = "mapred.job.tracker=" + "localhost:" + mr.getJobTrackerPort();
      String strNamenode = "fs.default.name=" + namenode;
      String argv[] = new String[] {
        "-input", INPUT_FILE,
        "-output", OUTPUT_DIR,
        "-mapper", map,
        "-reducer", reduce,
        //"-verbose",
        //"-jobconf", "stream.debug=set"
        "-jobconf", strNamenode,
        "-jobconf", strJobtracker,
        "-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp"),
        "-jobconf",
          JobConf.MAPRED_MAP_TASK_JAVA_OPTS + "=" +
            "-Dcontrib.name=" + System.getProperty("contrib.name") + " " +
            "-Dbuild.test=" + System.getProperty("build.test") + " " +
            conf.get(JobConf.MAPRED_MAP_TASK_JAVA_OPTS,
                     conf.get(JobConf.MAPRED_TASK_JAVA_OPTS, "")),
        "-jobconf",
          JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS + "=" +
            "-Dcontrib.name=" + System.getProperty("contrib.name") + " " +
            "-Dbuild.test=" + System.getProperty("build.test") + " " +
            conf.get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS,
                     conf.get(JobConf.MAPRED_TASK_JAVA_OPTS, "")),
        "-cacheFile", "hdfs://"+fileSys.getName()+CACHE_FILE + "#" + mapString,
        "-cacheFile", "hdfs://"+fileSys.getName()+CACHE_FILE_2 + "#" + mapString2
      };

      fileSys.delete(new Path(OUTPUT_DIR));
     
      DataOutputStream file = fileSys.create(new Path(INPUT_FILE));
      file.writeBytes(mapString + "\n");
      file.writeBytes(mapString2 + "\n");
      file.close();
      file = fileSys.create(new Path(CACHE_FILE));
      file.writeBytes(cacheString);
      file.close();
      file = fileSys.create(new Path(CACHE_FILE_2));
      file.writeBytes(cacheString2);
      file.close();
       
      job = new StreamJob(argv, mayExit);    
      job.go();

      fileSys = dfs.getFileSystem();
      String line = null;
      String line2 = null;
      Path[] fileList = FileUtil.stat2Paths(fileSys.listStatus(
                                   new Path(OUTPUT_DIR),
                                   new Utils.OutputFileUtils
                                     .OutputFilesFilter()));
      for (int i = 0; i < fileList.length; i++){
        System.out.println(fileList[i].toString());
        BufferedReader bread =
          new BufferedReader(new InputStreamReader(fileSys.open(fileList[i])));
        line = bread.readLine();
        System.out.println(line);
        line2 = bread.readLine();
        System.out.println(line2);
      }
      assertEquals(cacheString + "\t", line);
      assertEquals(cacheString2 + "\t", line2);
    } finally{
      if (dfs != null) { dfs.shutdown(); }
      if (mr != null) { mr.shutdown();}
    }
   
  }
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

  }

  public void testSymLink() throws Exception
  {
    boolean mayExit = false;
    MiniMRCluster mr = null;
    MiniDFSCluster dfs = null;
    try{
      Configuration conf = new Configuration();
      dfs = new MiniDFSCluster(conf, 1, true, null);
      FileSystem fileSys = dfs.getFileSystem();
      String namenode = fileSys.getName();
      mr  = new MiniMRCluster(1, namenode, 3);
      // During tests, the default Configuration will use a local mapred
      // So don't specify -config or -cluster
      String strJobtracker = "mapred.job.tracker=" + "localhost:" + mr.getJobTrackerPort();
      String strNamenode = "fs.default.name=" + namenode;
      String argv[] = new String[] {
        "-input", INPUT_FILE,
        "-output", OUTPUT_DIR,
        "-mapper", map,
        "-reducer", reduce,
        //"-verbose",
        //"-jobconf", "stream.debug=set"
        "-jobconf", strNamenode,
        "-jobconf", strJobtracker,
        "-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp"),
        "-jobconf",
          JobConf.MAPRED_MAP_TASK_JAVA_OPTS + "=" +
            "-Dcontrib.name=" + System.getProperty("contrib.name") + " " +
            "-Dbuild.test=" + System.getProperty("build.test") + " " +
            conf.get(JobConf.MAPRED_MAP_TASK_JAVA_OPTS,
                     conf.get(JobConf.MAPRED_TASK_JAVA_OPTS, "")),
        "-jobconf",
          JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS + "=" +
            "-Dcontrib.name=" + System.getProperty("contrib.name") + " " +
            "-Dbuild.test=" + System.getProperty("build.test") + " " +
            conf.get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS,
                     conf.get(JobConf.MAPRED_TASK_JAVA_OPTS, "")),
        "-cacheFile", "hdfs://"+fileSys.getName()+CACHE_FILE + "#testlink"
      };

      fileSys.delete(new Path(OUTPUT_DIR), true);
     
      DataOutputStream file = fileSys.create(new Path(INPUT_FILE));
      file.writeBytes(mapString);
      file.close();
      file = fileSys.create(new Path(CACHE_FILE));
      file.writeBytes(cacheString);
      file.close();
       
      job = new StreamJob(argv, mayExit);     
      job.go();

      fileSys = dfs.getFileSystem();
      String line = null;
      Path[] fileList = FileUtil.stat2Paths(fileSys.listStatus(
                                              new Path(OUTPUT_DIR),
                                              new Utils.OutputFileUtils
                                              .OutputFilesFilter()));
      for (int i = 0; i < fileList.length; i++){
        System.out.println(fileList[i].toString());
        BufferedReader bread =
          new BufferedReader(new InputStreamReader(fileSys.open(fileList[i])));
        line = bread.readLine();
        System.out.println(line);
      }
      assertEquals(cacheString + "\t", line);
    } finally{
      if (dfs != null) { dfs.shutdown(); }
      if (mr != null) { mr.shutdown();}
    }
  }
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

    conf = new Configuration();
    dfs = new MiniDFSCluster(conf, 1, true, null);
    fileSys = dfs.getFileSystem();
    namenode = fileSys.getUri().getAuthority();
    mr  = new MiniMRCluster(1, namenode, 1);
    strJobTracker = "mapred.job.tracker=" + "localhost:" + mr.getJobTrackerPort();
    strNamenode = "fs.default.name=" + namenode;
  }
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

  @BeforeClass
  public static void setUp() throws Exception {
    Configuration conf = new Configuration();
    dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null);
    jConf = new JobConf(conf);
    mrCluster = new MiniMRCluster(0, 0, numSlaves,
        dfsCluster.getFileSystem().getUri().toString(), 1, null, null, null,
        jConf);
   
    createTokenFileJson();
    verifySecretKeysInJSONFile();
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

        for (int i = 0; i < numTaskTrackers; i++) {
          hosts[i] = "127.0.1." + (i + 1);
        }
      }

      mrc = new MiniMRCluster(
        jobTrackerPort, taskTrackerPort, numTaskTrackers,
        fs, 1 /* numDir */, null /* racks */, hosts, null /*ugi*/,
        conf);
      LOG.info("Started MiniMRCluster  -- jobtracker on port "
          + mrc.getJobTrackerPort());
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

    cluster = new MiniDFSCluster(0, config, 1, true, true, true,  null, null,
        null, null);
    cluster.waitActive();
    URI uri = cluster.getFileSystem().getUri();
   
    MiniMRCluster miniMRCluster = new MiniMRCluster(0, uri.toString() ,
      3, null, null, config);
   
    config.set("mapred.job.tracker", "localhost:"+miniMRCluster.getJobTrackerPort());
    ProxyUsers.refreshSuperUserGroupsConfiguration(config);
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.