Examples of MiniDFSCluster


Examples of org.apache.hadoop.hdfs.MiniDFSCluster

public class TestNodeCount extends TestCase {
  static final Log LOG = LogFactory.getLog(TestNodeCount.class);

  public void testInvalidateMultipleReplicas() throws Exception {
    Configuration conf = new Configuration();
    MiniDFSCluster cluster =
      new MiniDFSCluster(conf, 5, true, null);
    final int FILE_LEN = 123;
    final String pathStr = "/testInvalidateMultipleReplicas";
    try {
      FileSystem fs = cluster.getFileSystem();
      Path path = new Path(pathStr);
      cluster.waitActive();
      // create a small file on 3 nodes
      DFSTestUtil.createFile(fs, path, 123, (short)3, 0);
      DFSTestUtil.waitReplication(fs, path, (short)3);
      NameNode nn = cluster.getNameNode();
      LocatedBlocks located = nn.getBlockLocations(pathStr, 0, FILE_LEN);
     
      // Get the original block locations
      List<LocatedBlock> blocks = located.getLocatedBlocks();
      LocatedBlock firstBlock = blocks.get(0);
     
      DatanodeInfo[] locations = firstBlock.getLocations();
      assertEquals("Should have 3 good blocks", 3, locations.length);
      nn.getNamesystem().stallReplicationWork();
     
      DatanodeInfo[] badLocations = new DatanodeInfo[2];
      badLocations[0] = locations[0];
      badLocations[1] = locations[1];
     
      // Report some blocks corrupt
      LocatedBlock badLBlock = new LocatedBlock(
          firstBlock.getBlock(), badLocations);
     
      nn.reportBadBlocks(new LocatedBlock[] {badLBlock});
     
      nn.getNamesystem().restartReplicationWork();
     
      DFSTestUtil.waitReplication(fs, path, (short)3);
      NumberReplicas num = nn.getNamesystem().countNodes(
          firstBlock.getBlock());
      assertEquals(0, num.corruptReplicas());

    } finally {
      cluster.shutdown();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster

 
  public void testNodeCount() throws Exception {
    // start a mini dfs cluster of 2 nodes
    final Configuration conf = new Configuration();
    final short REPLICATION_FACTOR = (short)2;
    final MiniDFSCluster cluster =
      new MiniDFSCluster(conf, REPLICATION_FACTOR, true, null);
    try {
      final FSNamesystem namesystem = cluster.getNameNode().namesystem;
      final FileSystem fs = cluster.getFileSystem();
     
      // populate the cluster with a one block file
      final Path FILE_PATH = new Path("/testfile");
      DFSTestUtil.createFile(fs, FILE_PATH, 1L, REPLICATION_FACTOR, 1L);
      DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);
      Block block = DFSTestUtil.getFirstBlock(fs, FILE_PATH);

      // keep a copy of all datanode descriptor
      DatanodeDescriptor[] datanodes = (DatanodeDescriptor[])
         namesystem.heartbeats.toArray(new DatanodeDescriptor[REPLICATION_FACTOR]);
     
      // start two new nodes
      cluster.startDataNodes(conf, 2, true, null, null);
      cluster.waitActive(false);
     
      LOG.info("Bringing down first DN");
      // bring down first datanode
      DatanodeDescriptor datanode = datanodes[0];
      DataNodeProperties dnprop = cluster.stopDataNode(datanode.getName());
      // make sure that NN detects that the datanode is down
      synchronized (namesystem.heartbeats) {
        datanode.setLastUpdate(0); // mark it dead
        namesystem.heartbeatCheck();
      }

      LOG.info("Waiting for block to be replicated");
      // the block will be replicated
      DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);

      LOG.info("Restarting first datanode");
      // restart the first datanode
      cluster.restartDataNode(dnprop);
      cluster.waitActive(false);

      LOG.info("Waiting for excess replicas to be detected");

      // check if excessive replica is detected
      waitForExcessReplicasToChangeTo(namesystem, block, 1);

      LOG.info("Finding a non-excess node");
      // find out a non-excess node
      Iterator<DatanodeDescriptor> iter = namesystem.blocksMap.nodeIterator(block);
      DatanodeDescriptor nonExcessDN = null;
      while (iter.hasNext()) {
        DatanodeDescriptor dn = iter.next();
        Collection<Block> blocks = namesystem.excessReplicateMap.get(dn.getStorageID());
        if (blocks == null || !blocks.contains(block) ) {
          nonExcessDN = dn;
          break;
        }
      }
      assertTrue(nonExcessDN!=null);

      LOG.info("Stopping non-excess node: " + nonExcessDN);
      // bring down non excessive datanode
      dnprop = cluster.stopDataNode(nonExcessDN.getName());
      // make sure that NN detects that the datanode is down
      synchronized (namesystem.heartbeats) {
        nonExcessDN.setLastUpdate(0); // mark it dead
        namesystem.heartbeatCheck();
      }

      LOG.info("Waiting for live replicas to hit repl factor");
      // The block should be replicated
      NumberReplicas num;
      do {
        num = namesystem.countNodes(block);
      } while (num.liveReplicas() != REPLICATION_FACTOR);

      LOG.info("Restarting first DN");
      // restart the first datanode
      cluster.restartDataNode(dnprop);
      cluster.waitActive(false);

      Thread.sleep(3000);

      LOG.info("Waiting for excess replicas to be detected");
      // check if excessive replica is detected
      waitForExcessReplicasToChangeTo(namesystem, block, 2);
    } finally {
      cluster.shutdown();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster

  private DistributedFileSystem fs;
  private Random rand = new Random();

  @Override
  protected void setUp() throws Exception {
    cluster = new MiniDFSCluster(CONF, 1, true, null);
    cluster.waitActive();
    cluster.getNameNode();
  nnMetrics = NameNode.getNameNodeMetrics();
    fs = (DistributedFileSystem) cluster.getFileSystem();
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster

  // params
  static final Path input = new Path("/test/input/");
  static final Path output = new Path("/test/output");
  File buildDir = new File(System.getProperty("test.build.data", "/tmp"));
  public void testJobShell() throws Exception {
    MiniDFSCluster dfs = null;
    MiniMRCluster mr = null;
    FileSystem fs = null;
    Path testFile = new Path(input, "testfile");
    try {
      Configuration conf = new Configuration();
      //start the mini mr and dfs cluster.
      dfs = new MiniDFSCluster(conf, 2 , true, null);
      fs = dfs.getFileSystem();
      FSDataOutputStream stream = fs.create(testFile);
      stream.write("teststring".getBytes());
      stream.close();
      mr = new MiniMRCluster(2, fs.getUri().toString(), 1);
      File thisbuildDir = new File(buildDir, "jobCommand");
      assertTrue("create build dir", thisbuildDir.mkdirs());
      File f = new File(thisbuildDir, "files_tmp");
      FileOutputStream fstream = new FileOutputStream(f);
      fstream.write("somestrings".getBytes());
      fstream.close();
      String[] args = new String[6];
      args[0] = "-files";
      args[1] = f.toString();
      args[2] = "-libjars";
      // the testjob.jar as a temporary jar file
      // rather than creating its own
      args[3] = "build/test/testjar/testjob.jar";
      args[4] = input.toString();
      args[5] = output.toString();
     
      JobConf jobConf = mr.createJobConf();
      //before running the job, verify that libjar is not in client classpath
      assertTrue("libjar not in client classpath", loadLibJar(jobConf)==null);
      int ret = ToolRunner.run(jobConf,
                               new testshell.ExternalMapReduce(), args);
      //after running the job, verify that libjar is in the client classpath
      assertTrue("libjar added to client classpath", loadLibJar(jobConf)!=null);
     
      assertTrue("not failed ", ret != -1);
      f.delete();
      thisbuildDir.delete();
     
      // test duplicate uris for options -files and -archives
      testDuplicateURI(mr, dfs);
    } finally {
      if (dfs != null) {dfs.shutdown();};
      if (mr != null) {mr.shutdown();};
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster

  private Path filea, fileb, filec;
  private Path archivePath;
 
  protected void setUp() throws Exception {
    super.setUp();
    dfscluster = new MiniDFSCluster(new Configuration(), 2, true, null);
    fs = dfscluster.getFileSystem();
    mapred = new MiniMRCluster(2, fs.getUri().toString(), 1);
    inputPath = new Path(fs.getHomeDirectory(), "test");
    inputrelPath = new Path(fs.getHomeDirectory().toUri().
        getPath().substring(1), "test");
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster

    }
  }
 
  void setUpCluster(JobConf conf) throws IOException {
    if(miniMRCluster == null) {
      miniDFSCluster = new MiniDFSCluster(conf, 1, true, null);
      FileSystem fileSys = miniDFSCluster.getFileSystem();
      TestMiniMRWithDFSWithDistinctUsers.mkdir(fileSys, "/user");
      TestMiniMRWithDFSWithDistinctUsers.mkdir
        (fileSys, conf.get("mapreduce.jobtracker.staging.root.dir",
                           "/tmp/hadoop/mapred/staging"));
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster

  private static MiniDFSCluster dfsCluster = null;
  public static Test suite() {
    TestSetup setup = new TestSetup(new TestSuite(TestReduceFetch.class)) {
      protected void setUp() throws Exception {
        Configuration conf = new Configuration();
        dfsCluster = new MiniDFSCluster(conf, 2, true, null);
        mrCluster = new MiniMRCluster(2,
            dfsCluster.getFileSystem().getUri().toString(), 1);
      }
      protected void tearDown() throws Exception {
        if (dfsCluster != null) { dfsCluster.shutdown(); }
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster

   * so they can directly submit a job.
   */
  protected void startCluster()
      throws IOException, InterruptedException {
    JobConf conf = new JobConf();
    dfsCluster = new MiniDFSCluster(conf, NUMBER_OF_NODES, true, null);
    conf.set("mapred.task.tracker.task-controller",
        MyLinuxTaskController.class.getName());
    mrCluster =
        new MiniMRCluster(NUMBER_OF_NODES, dfsCluster.getFileSystem().getUri()
            .toString(), 4, null, null, conf);
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster

    }
  }

  public void testLostTracker() throws IOException {
    String namenode = null;
    MiniDFSCluster dfs = null;
    MiniMRCluster mr = null;
    FileSystem fileSys = null;

    try {
      Configuration conf = new Configuration();
      conf.setBoolean("dfs.replication.considerLoad", false);
      dfs = new MiniDFSCluster(conf, 1, true, null, null);
      dfs.waitActive();
      fileSys = dfs.getFileSystem();
     
      // clean up
      fileSys.delete(testDir, true);
     
      if (!fileSys.mkdirs(inDir)) {
        throw new IOException("Mkdirs failed to create " + inDir.toString());
      }

      // Write the input file
      UtilsForTests.writeFile(dfs.getNameNode(), conf,
                              new Path(inDir + "/file"), (short)1);

      dfs.startDataNodes(conf, 1, true, null, null, null, null);
      dfs.waitActive();

      namenode = (dfs.getFileSystem()).getUri().getHost() + ":"
                 + (dfs.getFileSystem()).getUri().getPort();

      JobConf jtConf = new JobConf();
      jtConf.setInt("mapred.tasktracker.map.tasks.maximum", 1);
      jtConf.setInt("mapred.tasktracker.reduce.tasks.maximum", 1);
      jtConf.setLong("mapred.tasktracker.expiry.interval", 10 * 1000);
      jtConf.setInt("mapred.reduce.copy.backoff", 4);
     
      mr = new MiniMRCluster(2, namenode, 1, null, null, jtConf);
     
      // Test Lost tracker case
      testLostTracker(dfs, mr);
    } finally {
      if (mr != null) {
        try {
          mr.shutdown();
        } catch (Exception e) {}
      }
      if (dfs != null) {
        try {
          dfs.shutdown();
        } catch (Exception e) {}
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster

    }
  }

  public void testWithDFS() throws IOException {
    MiniDFSCluster dfs = null;
    MiniMRCluster mr = null;
    FileSystem fileSys = null;
    try {
      final int taskTrackers = 4;

      Configuration conf = new Configuration();
      dfs = new MiniDFSCluster(conf, 4, true, null);
      fileSys = dfs.getFileSystem();
      mr = new MiniMRCluster(taskTrackers, fileSys.getUri().toString(), 1);
      // make cleanup inline sothat validation of existence of these directories
      // can be done
      mr.setInlineCleanupThreads();

      runPI(mr, mr.createJobConf());
      runWordCount(mr, mr.createJobConf());
    } finally {
      if (dfs != null) { dfs.shutdown(); }
      if (mr != null) { mr.shutdown();
      }
    }
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.