Examples of MiniMRCluster


Examples of org.apache.hadoop.mapred.MiniMRCluster

  }

  public void testMapCount() throws Exception {
    String namenode = null;
    MiniDFSCluster dfs = null;
    MiniMRCluster mr = null;
    try {
      Configuration conf = new Configuration();
      dfs = new MiniDFSCluster(conf, 3, true, null);
      FileSystem fs = dfs.getFileSystem();
      final FsShell shell = new FsShell(conf);
      namenode = fs.getUri().toString();
      mr = new MiniMRCluster(3, namenode, 1);
      MyFile[] files = createFiles(fs.getUri(), "/srcdat");
      long totsize = 0;
      for (MyFile f : files) {
        totsize += f.getSize();
      }
      Configuration job = mr.createJobConf();
      job.setLong("distcp.bytes.per.map", totsize / 3);
      ToolRunner.run(new DistCp(job),
          new String[] {"-m", "100",
                        "-log",
                        namenode+"/logs",
                        namenode+"/srcdat",
                        namenode+"/destdat"});
      assertTrue("Source and destination directories do not match.",
                 checkFiles(fs, "/destdat", files));

      String logdir = namenode + "/logs";
      System.out.println(execCmd(shell, "-lsr", logdir));
      FileStatus[] logs = fs.listStatus(new Path(logdir));
      // rare case where splits are exact, logs.length can be 4
      assertTrue("Unexpected map count, logs.length=" + logs.length,
          logs.length == 5 || logs.length == 4);

      deldir(fs, "/destdat");
      deldir(fs, "/logs");
      ToolRunner.run(new DistCp(job),
          new String[] {"-m", "1",
                        "-log",
                        namenode+"/logs",
                        namenode+"/srcdat",
                        namenode+"/destdat"});

      System.out.println(execCmd(shell, "-lsr", logdir));
      logs = fs.listStatus(new Path(namenode+"/logs"));
      assertTrue("Unexpected map count, logs.length=" + logs.length,
          logs.length == 2);
    } finally {
      if (dfs != null) { dfs.shutdown(); }
      if (mr != null) { mr.shutdown(); }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

    dfs.waitActive();
    fileSys = dfs.getFileSystem();
    namenode = fileSys.getUri().toString();

    FileSystem.setDefaultUri(conf, namenode);
    mr = new MiniMRCluster(4, namenode, 3);
    jobTrackerName = "localhost:" + mr.getJobTrackerPort();
    hftp = "hftp://localhost.localdomain:" + dfs.getNameNodePort();

    FileSystem.setDefaultUri(conf, namenode);
    conf.set("mapred.job.tracker", jobTrackerName);
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

    dfs = new MiniDFSCluster(conf, 6, true, null);
    dfs.waitActive();
    fileSys = dfs.getFileSystem();
    namenode = fileSys.getUri().toString();
    mr = new MiniMRCluster(taskTrackers, namenode, 3);
    jobTrackerName = "localhost:" + mr.getJobTrackerPort();
    hftp = "hftp://localhost.localdomain:" + dfs.getNameNodePort();

    FileSystem.setDefaultUri(conf, namenode);
    conf.set("mapred.job.tracker", jobTrackerName);
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

    // Tests were failing because this process used 6GB of virtual memory and was getting killed.
    // we up the VM usable so that processes don't get killed.
    conf.setFloat("yarn.nodemanager.vmem-pmem-ratio", 8.0f);

    mrCluster = new MiniMRCluster(0, 0, servers,
      FileSystem.get(conf).getUri().toString(), 1, null, null, null, new JobConf(conf));

    JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
    if (jobConf == null) {
      jobConf = mrCluster.createJobConf();
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

    if (options.localFS) {
      Path localFSRootPath = new Path(System.getProperty("test.build.data",
          "build/test/data/work-dir"));
      fileSys = localFSRootPath.getFileSystem(conf);
      rootPath = new Path(localFSRootPath, options.rootPath);
      mr = new MiniMRCluster(options.taskTrackers, "file:///", 3);
    } else {
      dfs = new MiniDFSCluster(conf, options.dataNodes, true, null);
      fileSys = dfs.getFileSystem();
      rootPath = new Path(options.rootPath);
      mr = new MiniMRCluster(options.taskTrackers, fileSys.getUri().toString(),
          1);
    }
    conf = getJobConf("TestBasicTableIOFormat");
    srcPath = new Path(rootPath, options.srcPath);
    fwdIndexRootPath = new Path(rootPath, options.fwdIndexRootPath);
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

    if (printResults) {
      LOG.info("Print table contents before map/reduce");
    }
    scanTable(printResults);

    MiniMRCluster mrCluster = new MiniMRCluster(2, fs.getUri().toString(), 1);

    // set configuration parameter for index build
    conf.set("hbase.index.conf", createIndexConfContent());

    try {
      jobConf = new JobConf(conf, TestTableIndex.class);
      jobConf.setJobName("index column contents");
      jobConf.setNumMapTasks(2);
      // number of indexes to partition into
      jobConf.setNumReduceTasks(1);

      // use identity map (a waste, but just as an example)
      IdentityTableMap.initJob(TABLE_NAME, INPUT_COLUMN,
          IdentityTableMap.class, jobConf);

      // use IndexTableReduce to build a Lucene index
      jobConf.setReducerClass(IndexTableReduce.class);
      FileOutputFormat.setOutputPath(jobConf, new Path(INDEX_DIR));
      jobConf.setOutputFormat(IndexOutputFormat.class);

      JobClient.runJob(jobConf);

    } finally {
      mrCluster.shutdown();
    }

    if (printResults) {
      LOG.info("Print table contents after map/reduce");
    }
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

  public void testMultiRegionTable() throws IOException {
    runTestOnTable(new HTable(conf, MULTI_REGION_TABLE_NAME));
  }

  private void runTestOnTable(HTable table) throws IOException {
    @SuppressWarnings("deprecation")
    MiniMRCluster mrCluster = new MiniMRCluster(2, fs.getUri().toString(), 1);

    JobConf jobConf = null;
    try {
      LOG.info("Before map/reduce startup");
      jobConf = new JobConf(conf, TestTableMapReduce.class);
      jobConf.setJobName("process column contents");
      jobConf.setNumReduceTasks(1);
      TableMapReduceUtil.initTableMapJob(Bytes.toString(table.getTableName()),
        INPUT_COLUMN, ProcessContentsMapper.class,
        ImmutableBytesWritable.class, BatchUpdate.class, jobConf);
      TableMapReduceUtil.initTableReduceJob(Bytes.toString(table.getTableName()),
        IdentityTableReduce.class, jobConf);
           
      LOG.info("Started " + Bytes.toString(table.getTableName()));
      JobClient.runJob(jobConf);
      LOG.info("After map/reduce completion");

      // verify map-reduce results
      verify(Bytes.toString(table.getTableName()));
    } finally {
      mrCluster.shutdown();
      if (jobConf != null) {
        FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir")));
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

    conf.setBoolean("mapreduce.map.speculative", false);
    conf.setBoolean("mapreduce.reduce.speculative", false);
    ////

    // Allow the user to override FS URI for this map-reduce cluster to use.
    mrCluster = new MiniMRCluster(servers,
      FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1,
      null, null, new JobConf(this.conf));
    JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
    if (jobConf == null) {
      jobConf = mrCluster.createJobConf();
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

            Configuration config = new Configuration();
           
            // Builds and starts the mini dfs and mapreduce clusters
            m_dfs = new MiniDFSCluster(config, dataNodes, true, null);
            m_fileSys = m_dfs.getFileSystem();
            m_mr = new MiniMRCluster(taskTrackers, m_fileSys.getUri().toString(), 1);
           
            // Create the configuration hadoop-site.xml file
            File conf_dir = new File(System.getProperty("user.home"), "pigtest/conf/");
            conf_dir.mkdirs();
            File conf_file = new File(conf_dir, "hadoop-site.xml");
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

      System.setProperty("hadoop.log.dir", System.getProperty(
          "test.build.data", "/tmp"));
      MiniDFSCluster dfs = new MiniDFSCluster(conf, NUM_HADOOP_SLAVES, true,
          null);
      FileSystem fileSys = dfs.getFileSystem();
      MiniMRCluster mr = new MiniMRCluster(NUM_HADOOP_SLAVES, fileSys.getUri()
          .toString(), 1);
      writeASinkFile(conf, fileSys, DEMUX_INPUT_PATH, LINES);

      System.out.println("wrote "
          + fileSys.getFileStatus(DEMUX_INPUT_PATH).getLen()
          + " bytes of temp test data");
      long ts_start = System.currentTimeMillis();
      runDemux(mr.createJobConf(), DEMUX_INPUT_PATH, DEMUX_OUTPUT_PATH);

      long time = (System.currentTimeMillis() - ts_start);
      long bytes = fileSys.getContentSummary(DEMUX_OUTPUT_PATH).getLength();
      System.out.println("result was " + bytes + " bytes long");
      System.out.println("processing took " + time + " milliseconds");
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.