Examples of MiniMRCluster


Examples of org.apache.hadoop.mapred.MiniMRCluster

      final int numSlaves = 2;
      Configuration conf = new Configuration();
      dfs = new MiniDFSCluster(conf, numSlaves, true, null);
      fs = dfs.getFileSystem();
     
      mr = new MiniMRCluster(numSlaves, fs.getUri().toString(), 1);
      writeInputFile(fs, inputPath);
      map = StreamUtil.makeJavaCommand(UlimitApp.class, new String[]{})
      runProgram(SET_MEMORY_LIMIT);
      fs.delete(outputPath, true);
      assertFalse("output not cleaned up", fs.exists(outputPath));
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

    if (System.getProperty("compile.c++") == null) {
      LOG.info("compile.c++ is not defined, so skipping TestPipes");
      return;
    }
    MiniDFSCluster dfs = null;
    MiniMRCluster mr = null;
    Path cppExamples = new Path(System.getProperty("install.c++.examples"));
    Path inputPath = new Path("/testing/in");
    Path outputPath = new Path("/testing/out");
    try {
      final int numSlaves = 2;
      Configuration conf = new Configuration();
      dfs = new MiniDFSCluster(conf, numSlaves, true, null);
      mr = new MiniMRCluster(numSlaves, dfs.getFileSystem().getName(), 1);
      writeInputFile(dfs.getFileSystem(), inputPath);
      runProgram(mr, dfs, new Path(cppExamples, "bin/wordcount-simple"),
                 inputPath, outputPath, 3, 2, twoSplitOutput);
      cleanup(dfs.getFileSystem(), outputPath);

      runProgram(mr, dfs, new Path(cppExamples, "bin/wordcount-simple"),
                 inputPath, outputPath, 3, 0, noSortOutput);
      cleanup(dfs.getFileSystem(), outputPath);

      runProgram(mr, dfs, new Path(cppExamples, "bin/wordcount-part"),
                 inputPath, outputPath, 3, 2, fixedPartitionOutput);
      runNonPipedProgram(mr, dfs, new Path(cppExamples,"bin/wordcount-nopipe"));
      mr.waitUntilIdle();
    } finally {
      mr.shutdown();
      dfs.shutdown();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

 
  protected void setUp() throws Exception {
    super.setUp();
    dfscluster = new MiniDFSCluster(new JobConf(), 2, true, null);
    fs = dfscluster.getFileSystem();
    mapred = new MiniMRCluster(2, fs.getUri().toString(), 1);
    inputPath = new Path(fs.getHomeDirectory(), "test");
    filea = new Path(inputPath,"a");
    fileb = new Path(inputPath,"b");
    filec = new Path(inputPath,"c");
    // check for har containing escape worthy characters
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

    b.close();
    return result.toString();
  }

  public void testWithLocal() throws Exception {
    MiniMRCluster mr = null;
    try {
      mr = new MiniMRCluster(2, "file:///", 3);
      Configuration conf = mr.createJobConf();
      runWordCount(conf);
    } finally {
      if (mr != null) { mr.shutdown(); }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

    if (options.localFS) {
      Path localFSRootPath = new Path(System.getProperty("test.build.data",
          "build/test/data/work-dir"));
      fileSys = localFSRootPath.getFileSystem(conf);
      rootPath = new Path(localFSRootPath, options.rootPath);
      mr = new MiniMRCluster(options.taskTrackers, "file:///", 3);
    } else {
      dfs = new MiniDFSCluster(conf, options.dataNodes, true, null);
      fileSys = dfs.getFileSystem();
      rootPath = new Path(options.rootPath);
      mr = new MiniMRCluster(options.taskTrackers, fileSys.getUri().toString(),
          1);
    }
    conf = getJobConf("TestBasicTableIOFormat");
    srcPath = new Path(rootPath, options.srcPath);
    fwdIndexRootPath = new Path(rootPath, options.fwdIndexRootPath);
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

    if (printResults) {
      LOG.info("Print table contents before map/reduce");
    }
    scanTable(printResults);

    MiniMRCluster mrCluster = new MiniMRCluster(2, fs.getUri().toString(), 1);

    // set configuration parameter for index build
    conf.set("hbase.index.conf", createIndexConfContent());

    try {
      jobConf = new JobConf(conf, TestTableIndex.class);
      jobConf.setJobName("index column contents");
      jobConf.setNumMapTasks(2);
      // number of indexes to partition into
      jobConf.setNumReduceTasks(1);

      // use identity map (a waste, but just as an example)
      IdentityTableMap.initJob(TABLE_NAME, INPUT_COLUMN,
          IdentityTableMap.class, jobConf);

      // use IndexTableReduce to build a Lucene index
      jobConf.setReducerClass(IndexTableReduce.class);
      FileOutputFormat.setOutputPath(jobConf, new Path(INDEX_DIR));
      jobConf.setOutputFormat(IndexOutputFormat.class);

      JobClient.runJob(jobConf);

    } finally {
      mrCluster.shutdown();
    }

    if (printResults) {
      LOG.info("Print table contents after map/reduce");
    }
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

            Configuration config = new Configuration();
           
            // Builds and starts the mini dfs and mapreduce clusters
            m_dfs = new MiniDFSCluster(config, dataNodes, true, null);
            m_fileSys = m_dfs.getFileSystem();
            m_mr = new MiniMRCluster(taskTrackers, m_fileSys.getUri().toString(), 1);
           
            // Create the configuration hadoop-site.xml file
            File conf_dir = new File(System.getProperty("user.home"), "pigtest/conf/");
            conf_dir.mkdirs();
            File conf_file = new File(conf_dir, "hadoop-site.xml");
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

      Configuration config = new Configuration();
     
            // Builds and starts the mini dfs and mapreduce clusters
            m_dfs = new MiniDFSCluster(config, dataNodes, true, null);
            m_fileSys = m_dfs.getFileSystem();
            m_mr = new MiniMRCluster(taskTrackers, m_fileSys.getName(), 1);
           
            // Create the configuration hadoop-site.xml file
            File conf_dir = new File(System.getProperty("user.home"), "pigtest/conf/");
            conf_dir.mkdirs();
            File conf_file = new File(conf_dir, "hadoop-site.xml");
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

    conf.setBoolean("mapreduce.map.speculative", false);
    conf.setBoolean("mapreduce.reduce.speculative", false);
    ////

    // Allow the user to override FS URI for this map-reduce cluster to use.
    mrCluster = new MiniMRCluster(servers,
      FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1,
      null, null, new JobConf(this.conf));
    JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
    if (jobConf == null) {
      jobConf = mrCluster.createJobConf();
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

    fileSys = (DistributedFileSystem) dfs.getFileSystem();
    namenode = dfs.getNameNode();
    String namenodeRoot = fileSys.getUri().toString();

    FileSystem.setDefaultUri(conf, namenodeRoot);
    mr = new MiniMRCluster(4, namenodeRoot, 3);
    jobTrackerName = "localhost:" + mr.getJobTrackerPort();
    hftp = "hftp://localhost.localdomain:" + dfs.getNameNodePort();

    //FileSystem.setDefaultUri(conf, namenodeRoot);
    conf.set("mapred.job.tracker", jobTrackerName);
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.