Examples of MiniMRCluster


Examples of org.apache.hadoop.mapred.MiniMRCluster

    fs = FileSystem.get(conf);
    System.setProperty("hadoop.log.dir", new File(workDir, "/logs").getAbsolutePath());
    // LocalJobRunner does not work with mapreduce OutputCommitter. So need
    // to use MiniMRCluster. MAPREDUCE-2350
    mrCluster = new MiniMRCluster(1, fs.getUri().toString(), 1, null, null,
        new JobConf(conf));
    mrConf = mrCluster.createJobConf();

    if (isServerRunning) {
      return;
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

    conf.set("yarn.scheduler.capacity.root.queues", "default");
    conf.set("yarn.scheduler.capacity.root.default.capacity", "100");

    FileSystem fs = FileSystem.get(conf);
    System.setProperty("hadoop.log.dir", new File(workDir, "/logs").getAbsolutePath());
    mrCluster = new MiniMRCluster(1, fs.getUri().toString(), 1, null, null,
      new JobConf(conf));
    mrConf = mrCluster.createJobConf();
    fs.mkdirs(warehousedir);

    initializeSetup();
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

    fs = FileSystem.get(conf);
    System.setProperty("hadoop.log.dir", new File(workDir, "/logs").getAbsolutePath());
    // LocalJobRunner does not work with mapreduce OutputCommitter. So need
    // to use MiniMRCluster. MAPREDUCE-2350
    mrCluster = new MiniMRCluster(1, fs.getUri().toString(), 1, null, null,
      new JobConf(conf));
    mrConf = mrCluster.createJobConf();
  }
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

      jobConf.set("yarn.scheduler.capacity.root.queues", "default");
      jobConf.set("yarn.scheduler.capacity.root.default.capacity", "100");
      //conf.set("hadoop.job.history.location",new File(workDir).getAbsolutePath()+"/history");
      System.setProperty("hadoop.log.dir", new File(workDir, "/logs").getAbsolutePath());

      mrCluster = new MiniMRCluster(jobTrackerPort,
        taskTrackerPort,
        numTaskTrackers,
        getFileSystem().getUri().toString(),
        numTaskTrackers,
        null,
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

    if (printResults) {
      LOG.info("Print table contents before map/reduce");
    }
    scanTable(printResults);

    @SuppressWarnings("deprecation")
    MiniMRCluster mrCluster = new MiniMRCluster(2, fs.getUri().toString(), 1);

    // set configuration parameter for index build
    conf.set("hbase.index.conf", createIndexConfContent());

    try {
      JobConf jobConf = new JobConf(conf, TestTableIndex.class);
      jobConf.setJobName("index column contents");
      jobConf.setNumMapTasks(2);
      // number of indexes to partition into
      jobConf.setNumReduceTasks(1);

      // use identity map (a waste, but just as an example)
      IdentityTableMap.initJob(TABLE_NAME, INPUT_COLUMN,
          IdentityTableMap.class, jobConf);

      // use IndexTableReduce to build a Lucene index
      jobConf.setReducerClass(IndexTableReduce.class);
      jobConf.setOutputPath(new Path(INDEX_DIR));
      jobConf.setOutputFormat(IndexOutputFormat.class);

      JobClient.runJob(jobConf);

    } finally {
      mrCluster.shutdown();
    }

    if (printResults) {
      LOG.info("Print table contents after map/reduce");
    }
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

  /**
   * Test hbase mapreduce jobs against a multi-region table.
   * @throws IOException
   */
  public void testTableMapReduce() throws IOException {
    @SuppressWarnings("deprecation")
    MiniMRCluster mrCluster = new MiniMRCluster(2, fs.getUri().toString(), 1);

    try {
      JobConf jobConf = new JobConf(conf, TestTableMapReduce.class);
      jobConf.setJobName("process column contents");
      jobConf.setNumMapTasks(2);
      jobConf.setNumReduceTasks(1);

      TableMap.initJob(MULTI_REGION_TABLE_NAME, INPUT_COLUMN,
          ProcessContentsMapper.class, jobConf);

      TableReduce.initJob(MULTI_REGION_TABLE_NAME,
          IdentityTableReduce.class, jobConf);
      LOG.info("Started " + MULTI_REGION_TABLE_NAME);
      JobClient.runJob(jobConf);

      // verify map-reduce results
      verify(MULTI_REGION_TABLE_NAME);
    } finally {
      mrCluster.shutdown();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

  public void testTableIndex() throws IOException {
    long firstK = 32;
    LOG.info("Print table contents before map/reduce");
    scanTable(conf, firstK);

    @SuppressWarnings("deprecation")
    MiniMRCluster mrCluster = new MiniMRCluster(2, fs.getUri().toString(), 1);

    // set configuration parameter for index build
    conf.set("hbase.index.conf", createIndexConfContent());

    try {
      JobConf jobConf = new JobConf(conf, TestTableIndex.class);
      jobConf.setJobName("index column contents");
      jobConf.setNumMapTasks(2);
      // number of indexes to partition into
      jobConf.setNumReduceTasks(1);

      // use identity map (a waste, but just as an example)
      IdentityTableMap.initJob(TABLE_NAME, INPUT_COLUMN,
          IdentityTableMap.class, jobConf);

      // use IndexTableReduce to build a Lucene index
      jobConf.setReducerClass(IndexTableReduce.class);
      jobConf.setOutputPath(new Path(INDEX_DIR));
      jobConf.setOutputFormat(IndexOutputFormat.class);

      JobClient.runJob(jobConf);

    } finally {
      mrCluster.shutdown();
    }

    LOG.info("Print table contents after map/reduce");
    scanTable(conf, firstK);
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

    }

    LOG.info("Print table contents before map/reduce");
    scanTable(conf, SINGLE_REGION_TABLE_NAME);
   
    @SuppressWarnings("deprecation")
    MiniMRCluster mrCluster = new MiniMRCluster(2, fs.getUri().toString(), 1);

    try {
      JobConf jobConf = new JobConf(conf, TestTableMapReduce.class);
      jobConf.setJobName("process column contents");
      jobConf.setNumMapTasks(1);
      jobConf.setNumReduceTasks(1);

      TableMap.initJob(SINGLE_REGION_TABLE_NAME, INPUT_COLUMN,
          ProcessContentsMapper.class, jobConf);

      TableReduce.initJob(SINGLE_REGION_TABLE_NAME,
          IdentityTableReduce.class, jobConf);

      JobClient.runJob(jobConf);
     
    } finally {
      mrCluster.shutdown();
    }
   
    LOG.info("Print table contents after map/reduce");
    scanTable(conf, SINGLE_REGION_TABLE_NAME);
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

    // Verify table indeed has multiple regions
    HTable table = new HTable(conf, new Text(MULTI_REGION_TABLE_NAME));
    Text[] startKeys = table.getStartKeys();
    assertTrue(startKeys.length > 1);

    @SuppressWarnings("deprecation")
    MiniMRCluster mrCluster = new MiniMRCluster(2, fs.getUri().toString(), 1);

    try {
      JobConf jobConf = new JobConf(conf, TestTableMapReduce.class);
      jobConf.setJobName("process column contents");
      jobConf.setNumMapTasks(2);
      jobConf.setNumReduceTasks(1);

      TableMap.initJob(MULTI_REGION_TABLE_NAME, INPUT_COLUMN,
          ProcessContentsMapper.class, jobConf);

      TableReduce.initJob(MULTI_REGION_TABLE_NAME,
          IdentityTableReduce.class, jobConf);

      JobClient.runJob(jobConf);
     
    } finally {
      mrCluster.shutdown();
    }
   
    // verify map-reduce results
    verify(conf, MULTI_REGION_TABLE_NAME);
  }
View Full Code Here

Examples of org.apache.hadoop.mapred.MiniMRCluster

    if (System.getProperty("compile.c++") == null) {
      LOG.info("compile.c++ is not defined, so skipping TestPipes");
      return;
    }
    MiniDFSCluster dfs = null;
    MiniMRCluster mr = null;
    FileSystem fs = null;
    Path cppExamples = new Path(System.getProperty("install.c++.examples"));
    Path inputPath = new Path("/testing/in");
    Path outputPath = new Path("/testing/out");
    try {
      final int numSlaves = 2;
      Configuration conf = new Configuration();
      dfs = new MiniDFSCluster(conf, numSlaves, true, null);
      fs = dfs.getFileSystem();
      mr = new MiniMRCluster(numSlaves, fs.getName(), 1);
      writeInputFile(fs, inputPath);
      runProgram(mr, fs, new Path(cppExamples, "bin/wordcount-simple"),
                 inputPath, outputPath, 3, 2, twoSplitOutput);
      FileUtil.fullyDelete(fs, outputPath);
      assertFalse("output not cleaned up", fs.exists(outputPath));
      runProgram(mr, fs, new Path(cppExamples, "bin/wordcount-simple"),
                 inputPath, outputPath, 3, 0, noSortOutput);
      FileUtil.fullyDelete(fs, outputPath);
      assertFalse("output not cleaned up", fs.exists(outputPath));
      runProgram(mr, fs, new Path(cppExamples, "bin/wordcount-part"),
                 inputPath, outputPath, 3, 2, fixedPartitionOutput);
      runNonPipedProgram(mr, fs, new Path(cppExamples, "bin/wordcount-nopipe"));
      mr.waitUntilIdle();
    } finally {
      mr.shutdown();
      dfs.shutdown();
    }
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.