Package org.apache.hadoop.dfs

Examples of org.apache.hadoop.dfs.MiniDFSCluster


    }
  }

  public void testWithDFS() throws IOException {
    MiniDFSCluster dfs = null;
    MiniMRCluster mr = null;
    FileSystem fileSys = null;
    try {
      final int taskTrackers = 4;

      Configuration conf = new Configuration();
      dfs = new MiniDFSCluster(conf, 4, true, null);
      fileSys = dfs.getFileSystem();
      mr = new MiniMRCluster(taskTrackers, fileSys.getUri().toString(), 1);

      runPI(mr, mr.createJobConf());
      runWordCount(mr, mr.createJobConf());
    } finally {
      if (dfs != null) { dfs.shutdown(); }
      if (mr != null) { mr.shutdown();
      }
    }
  }
View Full Code Here


      }
    }
  }
 
  public void testWithDFSWithDefaultPort() throws IOException {
    MiniDFSCluster dfs = null;
    MiniMRCluster mr = null;
    FileSystem fileSys = null;
    try {
      final int taskTrackers = 4;

      Configuration conf = new Configuration();
      // start a dfs with the default port number
      dfs = new MiniDFSCluster(
          NameNode.DEFAULT_PORT, conf, 4, true, true, null, null);
      fileSys = dfs.getFileSystem();
      mr = new MiniMRCluster(taskTrackers, fileSys.getUri().toString(), 1);

      JobConf jobConf = mr.createJobConf();
      TestResult result;
      final Path inDir = new Path("./wc/input");
      final Path outDir = new Path("hdfs://" +
          dfs.getNameNode().getNameNodeAddress().getHostName() +
          ":" + NameNode.DEFAULT_PORT +"/./wc/output");
      String input = "The quick brown fox\nhas many silly\nred fox sox\n";
      result = launchWordCount(jobConf, inDir, outDir, input, 3, 1);
      assertEquals("The\t1\nbrown\t1\nfox\t2\nhas\t1\nmany\t1\n" +
                   "quick\t1\nred\t1\nsilly\t1\nsox\t1\n", result.output);
    } catch (java.net.BindException be) {
      LOG.info("Skip the test this time because can not start namenode on port "
          + NameNode.DEFAULT_PORT, be);
    } finally {
      if (dfs != null) { dfs.shutdown(); }
      if (mr != null) { mr.shutdown();
      }
    }
  }
View Full Code Here

      return;
    }
    try {
      final int numSlaves = 2;
      Configuration conf = new Configuration();
      dfs = new MiniDFSCluster(conf, numSlaves, true, null);
      fs = dfs.getFileSystem();
     
      mr = new MiniMRCluster(numSlaves, fs.getUri().toString(), 1);
      writeInputFile(fs, inputPath);
      map = StreamUtil.makeJavaCommand(UlimitApp.class, new String[]{})
View Full Code Here

public class TestNodeCount extends TestCase {
  public void testNodeCount() throws Exception {
    // start a mini dfs cluster of 2 nodes
    final Configuration conf = new Configuration();
    final short REPLICATION_FACTOR = (short)2;
    final MiniDFSCluster cluster =
      new MiniDFSCluster(conf, REPLICATION_FACTOR, true, null);
    try {
      final FSNamesystem namesystem = cluster.getNameNode().namesystem;
      final FileSystem fs = cluster.getFileSystem();
     
      // populate the cluster with a one block file
      final Path FILE_PATH = new Path("/testfile");
      DFSTestUtil.createFile(fs, FILE_PATH, 1L, REPLICATION_FACTOR, 1L);
      DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);
      Block block = DFSTestUtil.getFirstBlock(fs, FILE_PATH);

      // keep a copy of all datanode descriptor
      DatanodeDescriptor[] datanodes = (DatanodeDescriptor[])
         namesystem.heartbeats.toArray(new DatanodeDescriptor[REPLICATION_FACTOR]);
     
      // start two new nodes
      cluster.startDataNodes(conf, 2, true, null, null);
      cluster.waitActive();
     
      // bring down first datanode
      DatanodeDescriptor datanode = datanodes[0];
      DataNodeProperties dnprop = cluster.stopDataNode(datanode.getName());
      // make sure that NN detects that the datanode is down
      synchronized (namesystem.heartbeats) {
        datanode.setLastUpdate(0); // mark it dead
        namesystem.heartbeatCheck();
      }
      // the block will be replicated
      DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);

      // restart the first datanode
      cluster.restartDataNode(dnprop);
      cluster.waitActive();
     
      // check if excessive replica is detected
      NumberReplicas num = null;
      do {
       synchronized (namesystem) {
         num = namesystem.countNodes(block);
       }
      } while (num.excessReplicas() == 0);
     
      // find out a non-excess node
      Iterator<DatanodeDescriptor> iter = namesystem.blocksMap.nodeIterator(block);
      DatanodeDescriptor nonExcessDN = null;
      while (iter.hasNext()) {
        DatanodeDescriptor dn = iter.next();
        Collection<Block> blocks = namesystem.excessReplicateMap.get(dn.getStorageID());
        if (blocks == null || !blocks.contains(block) ) {
          nonExcessDN = dn;
          break;
        }
      }
      assertTrue(nonExcessDN!=null);
     
      // bring down non excessive datanode
      dnprop = cluster.stopDataNode(nonExcessDN.getName());
      // make sure that NN detects that the datanode is down
      synchronized (namesystem.heartbeats) {
        nonExcessDN.setLastUpdate(0); // mark it dead
        namesystem.heartbeatCheck();
      }
     
      // The block should be replicated
      do {
        num = namesystem.countNodes(block);
      } while (num.liveReplicas() != REPLICATION_FACTOR);
     
      // restart the first datanode
      cluster.restartDataNode(dnprop);
      cluster.waitActive();
     
      // check if excessive replica is detected
      do {
       num = namesystem.countNodes(block);
      } while (num.excessReplicas() == 2);
    } finally {
      cluster.shutdown();
    }
  }
View Full Code Here

  private Path dir = null;
 
  @Override
  public void setUp() throws Exception {
    try {
      this.cluster = new MiniDFSCluster(this.conf, 2, true, (String[])null);
      // Set the hbase.rootdir to be the home directory in mini dfs.
      this.conf.set(HConstants.HBASE_DIR,
        this.cluster.getFileSystem().getHomeDirectory().toString());
      this.dir = new Path(DIR, getName());
    } catch (IOException e) {
View Full Code Here

      // pg. 9 of BigTable paper).
      R = (ONE_GB / 10) * N;
    }
   
    MiniHBaseCluster hbaseMiniCluster = null;
    MiniDFSCluster dfsCluster = null;
    if (this.miniCluster) {
      dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
      // mangle the conf so that the fs parameter points to the minidfs we
      // just started up
      FileSystem fs = dfsCluster.getFileSystem();
      conf.set("fs.default.name", fs.getUri().toString());     
      Path parentdir = fs.getHomeDirectory();
      conf.set(HConstants.HBASE_DIR, parentdir.toString());
      fs.mkdirs(parentdir);
      FSUtils.setVersion(fs, parentdir);
View Full Code Here

  public HadoopShims.MiniDFSShim getMiniDfs(Configuration conf,
                                int numDataNodes,
                                boolean format,
                                String[] racks) throws IOException {
    return new MiniDFSShim(new MiniDFSCluster(conf, numDataNodes, format, racks));
  }
View Full Code Here

  public HadoopShims.MiniDFSShim getMiniDfs(Configuration conf,
                                int numDataNodes,
                                boolean format,
                                String[] racks) throws IOException {
    return new MiniDFSShim(new MiniDFSCluster(conf, numDataNodes, format, racks));
  }
View Full Code Here

*/
public class TestMiniMRDFSCaching extends TestCase {

  public void testWithDFS() throws IOException {
    MiniMRCluster mr = null;
    MiniDFSCluster dfs = null;
    FileSystem fileSys = null;
    try {
      JobConf conf = new JobConf();
      dfs = new MiniDFSCluster(65314, conf, true);
      fileSys = dfs.getFileSystem();
      mr = new MiniMRCluster(60050, 50060, 2, fileSys.getName(), true, 4);
      // run the wordcount example with caching
      boolean ret = MRCaching.launchMRCache("/testing/wc/input",
                                            "/testing/wc/output",
                                            "/cachedir",
                                            mr.createJobConf(),
                                            "The quick brown fox\nhas many silly\n"
                                                + "red fox sox\n");
      assertTrue("Archives not matching", ret);
    } finally {
      if (fileSys != null) {
        fileSys.close();
      }
      if (dfs != null) {
        dfs.shutdown();
      }
      if (mr != null) {
        mr.shutdown();
      }
    }
View Full Code Here

   * no map task to execute) is successful.
   * @throws IOException
   */
  public void testEmptyJobWithDFS() throws IOException {
      String namenode = null;
      MiniDFSCluster dfs = null;
      MiniMRCluster mr = null;
      FileSystem fileSys = null;
      try {
          final int taskTrackers = 4;
          final int jobTrackerPort = 60050;
          Configuration conf = new Configuration();
          dfs = new MiniDFSCluster(65315, conf, true);
          fileSys = dfs.getFileSystem();
          namenode = fileSys.getName();
          mr = new MiniMRCluster(jobTrackerPort, 50060, taskTrackers,
                                 namenode, true, 2);
          final String jobTrackerName = "localhost:" + mr.getJobTrackerPort();
          JobConf jobConf = new JobConf();
          boolean result;
          result = launchEmptyJob(namenode, jobTrackerName, jobConf,
                                   3, 1);
          assertTrue(result);
         
      } finally {
          if (fileSys != null) { fileSys.close(); }
          if (dfs != null) { dfs.shutdown(); }
          if (mr != null) { mr.shutdown(); }
      }
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.dfs.MiniDFSCluster

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.