Package org.apache.hadoop.dfs

Examples of org.apache.hadoop.dfs.NameNode


    String nameFSDir = baseDirSpecified + "/name";
    conf.set("dfs.name.dir", nameFSDir);
 
    NameNode.format(conf);
   
    nameNodeDaemon = new NameNode("localhost", nameNodePort, conf);

    //
    //        start DataNodes
    //
    for (int i = 0; i < dataNodeNum; i++) {
View Full Code Here


    String nameFSDir = baseDirSpecified + "/name";
  conf.set("dfs.name.dir", nameFSDir);
 
    NameNode.format(conf);
   
    nameNodeDaemon = new NameNode(new File(nameFSDir), nameNodePort, conf);

     //
      //        start DataNodes
      //
      for (int i = 0; i < dataNodeNum; i++) {
View Full Code Here

    String nameFSDir = baseDirSpecified + "/name";
    conf.set("dfs.name.dir", nameFSDir);
 
    NameNode.format(conf);
   
    nameNodeDaemon = new NameNode("localhost", nameNodePort, conf);

    //
    //        start DataNodes
    //
    for (int i = 0; i < dataNodeNum; i++) {
View Full Code Here

      cluster = new MiniDFSCluster(conf, 0, false, null);
      cluster.waitActive();
     
      LOG.info("Restarted cluster with just the NameNode");
     
      NameNode namenode = cluster.getNameNode();
     
      assertTrue("No datanode is started. Should be in SafeMode",
                 namenode.isInSafeMode());
     
      // manually set safemode.
      namenode.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
     
      // now bring up the datanode and wait for it to be active.
      cluster.startDataNodes(conf, 1, true, null, null);
      cluster.waitActive();
     
      LOG.info("Datanode is started.");
     
      try {
        Thread.sleep(2000);
      } catch (InterruptedException ignored) {}
     
      assertTrue("should still be in SafeMode", namenode.isInSafeMode());
     
      namenode.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
      assertFalse("should not be in SafeMode", namenode.isInSafeMode());
    } finally {
      if(fs != null) fs.close();
      if(cluster!= null) cluster.shutdown();
    }
  }
View Full Code Here

    String nameFSDir = baseDirSpecified + "/name";
  conf.set("dfs.name.dir", nameFSDir);
 
    NameNode.format(conf);
   
    nameNodeDaemon = new NameNode(new File[] { new File(nameFSDir) },
        "localhost", nameNodePort, conf);

     //
      //        start DataNodes
      //
View Full Code Here

    String nameFSDir = baseDirSpecified + "/name";
    conf.set("dfs.name.dir", nameFSDir);
 
    NameNode.format(conf);
   
    nameNodeDaemon = new NameNode("localhost", nameNodePort, conf);

    //
    //        start DataNodes
    //
    for (int i = 0; i < dataNodeNum; i++) {
View Full Code Here

    String nameFSDir = baseDirSpecified + "/name";
  conf.set("dfs.name.dir", nameFSDir);
 
    NameNode.format(conf);
   
    nameNodeDaemon = new NameNode(new File(nameFSDir), "localhost", nameNodePort, conf);

     //
      //        start DataNodes
      //
      for (int i = 0; i < dataNodeNum; i++) {
View Full Code Here

    String nameFSDir = baseDirSpecified + "/name";
  conf.set("dfs.name.dir", nameFSDir);
 
    NameNode.format(conf);
   
    nameNodeDaemon = new NameNode(new File(nameFSDir), nameNodePort, conf);

     //
      //        start DataNodes
      //
      for (int i = 0; i < dataNodeNum; i++) {
View Full Code Here

  /**
   * Verify JobTracker port usage.
   */
  public void testJobTrackerPorts() throws Exception {
    NameNode nn = null;
    try {
      nn = hdfs.startNameNode();

      // start job tracker on the same port as name-node
      JobConf conf2 = new JobConf(hdfs.getConfig());
View Full Code Here

  /**
   * Verify JobTracker port usage.
   */
  public void testTaskTrackerPorts() throws Exception {
    NameNode nn = null;
    JobTracker jt = null;
    try {
      nn = hdfs.startNameNode();

      JobConf conf2 = new JobConf(hdfs.getConfig());
View Full Code Here

TOP

Related Classes of org.apache.hadoop.dfs.NameNode

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.