Package org.apache.hadoop.dfs

Examples of org.apache.hadoop.dfs.MiniDFSCluster


  }

  /** copy files from dfs file system to local file system */
  public void testCopyFromDfsToLocal() throws Exception {
    String namenode = null;
    MiniDFSCluster cluster = null;
    try {
      Configuration conf = new Configuration();
      cluster = new MiniDFSCluster(conf, 1, true, null);
      namenode = conf.get("fs.default.name", "local");
      if (!"local".equals(namenode)) {
        MyFile[] files = createFiles(URI.create("hdfs://"+namenode), "/srcdat");
        ToolRunner.run(new CopyFiles(conf), new String[] {
                                         "-log",
                                         "/logs",
                                         "hdfs://"+namenode+"/srcdat",
                                         "file:///"+TEST_ROOT_DIR+"/destdat"});
        assertTrue("Source and destination directories do not match.",
                   checkFiles("local", TEST_ROOT_DIR+"/destdat", files));
        FileSystem fs = FileSystem.get(URI.create("hdfs://"+namenode+"/logs"), conf);
        assertTrue("Log directory does not exist.",
                    fs.exists(new Path("/logs")));
        deldir("local", TEST_ROOT_DIR+"/destdat");
        deldir(namenode, "/logs");
        deldir(namenode, "/srcdat");
      }
    } finally {
      if (cluster != null) { cluster.shutdown(); }
    }
  }
View Full Code Here


    }
  }

  public void testCopyDfsToDfsUpdateOverwrite() throws Exception {
    String namenode = null;
    MiniDFSCluster cluster = null;
    try {
      Configuration conf = new Configuration();
      cluster = new MiniDFSCluster(conf, 2, true, null);
      namenode = conf.get("fs.default.name", "local");
      if (!"local".equals(namenode)) {
        MyFile[] files = createFiles(URI.create("hdfs://"+namenode), "/srcdat");
        ToolRunner.run(new CopyFiles(conf), new String[] {
                                         "-p",
                                         "-log",
                                         "hdfs://"+namenode+"/logs",
                                         "hdfs://"+namenode+"/srcdat",
                                         "hdfs://"+namenode+"/destdat"});
        assertTrue("Source and destination directories do not match.",
                   checkFiles(namenode, "/destdat", files));
        FileSystem fs = FileSystem.get(URI.create("hdfs://"+namenode+"/logs"), conf);
        assertTrue("Log directory does not exist.",
                    fs.exists(new Path("hdfs://"+namenode+"/logs")));

        FileStatus[] dchkpoint = getFileStatus(namenode, "/destdat", files);
        final int nupdate = NFILES>>2;
        updateFiles(namenode, "/srcdat", files, nupdate);
        deldir(namenode, "/logs");

        ToolRunner.run(new CopyFiles(conf), new String[] {
                                         "-p",
                                         "-update",
                                         "-log",
                                         "hdfs://"+namenode+"/logs",
                                         "hdfs://"+namenode+"/srcdat",
                                         "hdfs://"+namenode+"/destdat"});
        assertTrue("Source and destination directories do not match.",
                   checkFiles(namenode, "/destdat", files));
        assertTrue("Update failed to replicate all changes in src",
                 checkUpdate(dchkpoint, namenode, "/destdat", files, nupdate));

        deldir(namenode, "/logs");
        ToolRunner.run(new CopyFiles(conf), new String[] {
                                         "-p",
                                         "-overwrite",
                                         "-log",
                                         "hdfs://"+namenode+"/logs",
                                         "hdfs://"+namenode+"/srcdat",
                                         "hdfs://"+namenode+"/destdat"});
        assertTrue("Source and destination directories do not match.",
                   checkFiles(namenode, "/destdat", files));
        assertTrue("-overwrite didn't.",
                 checkUpdate(dchkpoint, namenode, "/destdat", files, NFILES));

        deldir(namenode, "/destdat");
        deldir(namenode, "/srcdat");
        deldir(namenode, "/logs");
      }
    } finally {
      if (cluster != null) { cluster.shutdown(); }
    }
  }
View Full Code Here

   * socket factory
   */
  public void testSocketFactory() throws IOException {
    // Create a standard mini-cluster
    Configuration sconf = new Configuration();
    MiniDFSCluster cluster = new MiniDFSCluster(sconf, 1, true, null);
    final int nameNodePort = cluster.getNameNodePort();

    // Get a reference to its DFS directly
    FileSystem fs = cluster.getFileSystem();
    assertTrue(fs instanceof DistributedFileSystem);
    DistributedFileSystem directDfs = (DistributedFileSystem) fs;

    // Get another reference via network using a specific socket factory
    Configuration cconf = new Configuration();
    cconf.set("fs.default.name", String.format("hdfs://localhost:%s/",
        nameNodePort + 10));
    cconf.set("hadoop.rpc.socket.factory.class.default",
        "org.apache.hadoop.ipc.DummySocketFactory");
    cconf.set("hadoop.rpc.socket.factory.class.ClientProtocol",
        "org.apache.hadoop.ipc.DummySocketFactory");
    cconf.set("hadoop.rpc.socket.factory.class.JobSubmissionProtocol",
        "org.apache.hadoop.ipc.DummySocketFactory");

    fs = FileSystem.get(cconf);
    assertTrue(fs instanceof DistributedFileSystem);
    DistributedFileSystem dfs = (DistributedFileSystem) fs;

    JobClient client = null;

    try {
      // This will test RPC to the NameNode only.
      // could we test Client-DataNode connections?
      Path filePath = new Path("/dir");

      assertFalse(directDfs.exists(filePath));
      assertFalse(dfs.exists(filePath));

      directDfs.mkdirs(filePath);
      assertTrue(directDfs.exists(filePath));
      assertTrue(dfs.exists(filePath));

      // This will test TPC to a JobTracker
      MiniMRCluster mr = new MiniMRCluster(1, fs.getUri().toString(), 1);
      final int jobTrackerPort = mr.getJobTrackerPort();

      JobConf jconf = new JobConf(cconf);
      jconf.set("mapred.job.tracker", String.format("localhost:%d",
          jobTrackerPort + 10));
      client = new JobClient(jconf);

      JobStatus[] jobs = client.jobsToComplete();
      assertTrue(jobs.length == 0);

    } finally {
      try {
        if (client != null)
          client.close();
      } catch (Exception ignored) {
        // nothing we can do
        ignored.printStackTrace();
      }
      try {
        if (dfs != null)
          dfs.close();

      } catch (Exception ignored) {
        // nothing we can do
        ignored.printStackTrace();
      }
      try {
        if (directDfs != null)
          directDfs.close();

      } catch (Exception ignored) {
        // nothing we can do
        ignored.printStackTrace();
      }
      try {
        if (cluster != null)
          cluster.shutdown();

      } catch (Exception ignored) {
        // nothing we can do
        ignored.printStackTrace();
      }
View Full Code Here

  public void testCreate() throws Exception {
    Configuration conf = new Configuration();
    conf.setBoolean("dfs.permissions", true);
    conf.setInt(FsPermission.UMASK_LABEL, 0);
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
    cluster.waitActive();
    FileSystem fs = FileSystem.get(conf);

    try {
      FsPermission rootPerm = checkPermission(fs, "/", null);
      FsPermission inheritPerm = FsPermission.createImmutable(
          (short)(rootPerm.toShort() | 0300));

      FsPermission dirPerm = new FsPermission((short)0777);
      fs.mkdirs(new Path("/a1/a2/a3"), dirPerm);
      checkPermission(fs, "/a1", inheritPerm);
      checkPermission(fs, "/a1/a2", inheritPerm);
      checkPermission(fs, "/a1/a2/a3", dirPerm);

      FsPermission filePerm = new FsPermission((short)0444);
      FSDataOutputStream out = fs.create(new Path("/b1/b2/b3.txt"), filePerm,
          true, conf.getInt("io.file.buffer.size", 4096),
          fs.getDefaultReplication(), fs.getDefaultBlockSize(), null);
      out.write(123);
      out.close();
      checkPermission(fs, "/b1", inheritPerm);
      checkPermission(fs, "/b1/b2", inheritPerm);
      checkPermission(fs, "/b1/b2/b3.txt", filePerm);
     
      conf.setInt(FsPermission.UMASK_LABEL, 0022);
      FsPermission permission =
        FsPermission.createImmutable((short)0666);
      FileSystem.mkdirs(fs, new Path("/c1"), new FsPermission(permission));
      FileSystem.create(fs, new Path("/c1/c2.txt"),
          new FsPermission(permission));
      checkPermission(fs, "/c1", permission);
      checkPermission(fs, "/c1/c2.txt", permission);
    }
    finally {
      try{fs.close();} catch(Exception e) {}
      try{cluster.shutdown();} catch(Exception e) {}
    }
  }
View Full Code Here

  }

  public void testFilePermision() throws Exception {
    Configuration conf = new Configuration();
    conf.setBoolean("dfs.permissions", true);
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
    cluster.waitActive();
    FileSystem fs = FileSystem.get(conf);

    try {
      // following dir/file creations are legal
      fs.mkdirs(CHILD_DIR1);
      FSDataOutputStream out = fs.create(CHILD_FILE1);
      byte data[] = new byte[FILE_LEN];
      Random r = new Random();
      r.nextBytes(data);
      out.write(data);
      out.close();
      fs.setPermission(CHILD_FILE1, new FsPermission((short)0700));

      // following read is legal
      byte dataIn[] = new byte[FILE_LEN];
      FSDataInputStream fin = fs.open(CHILD_FILE1);
      fin.read(dataIn);
      for(int i=0; i<FILE_LEN; i++) {
        assertEquals(data[i], dataIn[i]);
      }
      fs.close();

      // test illegal file/dir creation
      UnixUserGroupInformation userGroupInfo = new UnixUserGroupInformation(
          USER_NAME, GROUP_NAMES );
      conf.set(UnixUserGroupInformation.UGI_PROPERTY_NAME,
          userGroupInfo.toString());
      fs = FileSystem.get(conf);

      // illegal mkdir
      assertTrue(!canMkdirs(fs, CHILD_DIR2));

      // illegal file creation
      assertTrue(!canCreate(fs, CHILD_FILE2));

      // illegal file open
      assertTrue(!canOpen(fs, CHILD_FILE1));
    }
    finally {
      try{fs.close();} catch(Exception e) {}
      try{cluster.shutdown();} catch(Exception e) {}
    }
  }
View Full Code Here

  private Path[] path = new Path[NUM_OF_PATHS];
 
  protected void setUp() throws Exception {
    try {
      Configuration conf = new Configuration();
      dfsCluster = new MiniDFSCluster(conf, 1, true, null);
      fs = FileSystem.get(conf);
    } catch (IOException e) {
      e.printStackTrace();
    }
  }
View Full Code Here

  private static MiniDFSCluster cluster = null;
  public static Test suite() {
    TestSetup setup = new TestSetup(new TestSuite(TestDatamerge.class)) {
      protected void setUp() throws Exception {
        Configuration conf = new Configuration();
        cluster = new MiniDFSCluster(conf, 2, true, null);
      }
      protected void tearDown() throws Exception {
        if (cluster != null) {
          cluster.shutdown();
        }
View Full Code Here

  /** This tests if permission setting in create, mkdir, and
   * setPermission works correctly
   */
  public void testPermissionSetting() throws Exception {
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
    try {
      cluster.waitActive();
      fs = FileSystem.get(conf);
      LOG.info("ROOT=" + fs.getFileStatus(new Path("/")));
      testPermissionSetting(OpType.CREATE); // test file creation
      testPermissionSetting(OpType.MKDIRS); // test directory creation
    } finally {
      fs.close();
      cluster.shutdown();
    }
  }
View Full Code Here

    }
  }

  /* check if the ownership of a file/directory is set correctly */
  public void testOwnership() throws Exception {
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
    try {
      cluster.waitActive();
      testOwnership(OpType.CREATE); // test file creation
      testOwnership(OpType.MKDIRS); // test directory creation
    } finally {
      fs.close();
      cluster.shutdown();
    }
  }
View Full Code Here

  };

  /* Check if namenode performs permission checking correctly for
   * superuser, file owner, group owner, and other users */
  public void testPermissionChecking() throws Exception {
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
    try {
      cluster.waitActive();
      fs = FileSystem.get(conf);

      // set the permission of the root to be world-wide rwx
      fs.setPermission(new Path("/"), new FsPermission((short)0777));
     
      // create a directory hierarchy and sets random permission for each inode
      PermissionGenerator ancestorPermissionGenerator =
        new PermissionGenerator(r);
      PermissionGenerator dirPermissionGenerator = new PermissionGenerator(r);
      PermissionGenerator filePermissionGenerator = new PermissionGenerator(r);
      short[] ancestorPermissions = new short[NUM_TEST_PERMISSIONS];
      short[] parentPermissions = new short[NUM_TEST_PERMISSIONS];
      short[] permissions = new short[NUM_TEST_PERMISSIONS];
      Path[] ancestorPaths = new Path[NUM_TEST_PERMISSIONS];
      Path[] parentPaths = new Path[NUM_TEST_PERMISSIONS];
      Path[] filePaths = new Path[NUM_TEST_PERMISSIONS];
      Path[] dirPaths = new Path[NUM_TEST_PERMISSIONS];
      for (int i = 0; i < NUM_TEST_PERMISSIONS; i++) {
        // create ancestor directory
        ancestorPaths[i] = new Path(ANCESTOR_NAME + i);
        create(OpType.MKDIRS, ancestorPaths[i]);
        fs.setOwner(ancestorPaths[i], USER1_NAME, GROUP2_NAME);
        // create parent directory
        parentPaths[i] = new Path(ancestorPaths[i], PARENT_NAME + i);
        create(OpType.MKDIRS, parentPaths[i]);
        // change parent directory's ownership to be user1
        fs.setOwner(parentPaths[i], USER1_NAME, GROUP2_NAME);

        filePaths[i] = new Path(parentPaths[i], FILE_NAME + i);
        dirPaths[i] = new Path(parentPaths[i], DIR_NAME + i);

        // makes sure that each inode at the same level
        // has a different permission
        ancestorPermissions[i] = ancestorPermissionGenerator.next();
        parentPermissions[i] = dirPermissionGenerator.next();
        permissions[i] = filePermissionGenerator.next();
        fs.setPermission(ancestorPaths[i], new FsPermission(
            ancestorPermissions[i]));
        fs.setPermission(parentPaths[i], new FsPermission(
                parentPermissions[i]));
      }

      /* file owner */
      testPermissionCheckingPerUser(USER1, ancestorPermissions,
          parentPermissions, permissions, parentPaths, filePaths, dirPaths);
      /* group owner */
      testPermissionCheckingPerUser(USER2, ancestorPermissions,
          parentPermissions, permissions, parentPaths, filePaths, dirPaths);
      /* other owner */
      testPermissionCheckingPerUser(USER3, ancestorPermissions,
          parentPermissions, permissions, parentPaths, filePaths, dirPaths);
      /* super owner */
      testPermissionCheckingPerUser(SUPERUSER, ancestorPermissions,
          parentPermissions, permissions, parentPaths, filePaths, dirPaths);
    } finally {
      fs.close();
      cluster.shutdown();
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.dfs.MiniDFSCluster

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.