Package org.apache.hadoop.hdfs

Examples of org.apache.hadoop.hdfs.MiniDFSCluster


    }
  }
 
  static void runTestCache(int port) throws Exception {
    Configuration conf = new Configuration();
    MiniDFSCluster cluster = null;
    try {
      cluster = new MiniDFSCluster(port, conf, 2, true, true, null, null);
      URI uri = cluster.getFileSystem().getUri();
      LOG.info("uri=" + uri);

      {
        FileSystem fs = FileSystem.get(uri, new Configuration());
        checkPath(cluster, fs);
        for(int i = 0; i < 100; i++) {
          assertTrue(fs == FileSystem.get(uri, new Configuration()));
        }
      }
     
      if (port == NameNode.DEFAULT_PORT) {
        //test explicit default port
        URI uri2 = new URI(uri.getScheme(), uri.getUserInfo(),
            uri.getHost(), NameNode.DEFAULT_PORT, uri.getPath(),
            uri.getQuery(), uri.getFragment())
        LOG.info("uri2=" + uri2);
        FileSystem fs = FileSystem.get(uri2, conf);
        checkPath(cluster, fs);
        for(int i = 0; i < 100; i++) {
          assertTrue(fs == FileSystem.get(uri2, new Configuration()));
        }
      }
    } finally {
      if (cluster != null) cluster.shutdown();
    }
  }
View Full Code Here


    ts = jt.getTaskStatus(attemptId);
    validateAttempt(tip, attemptId, ts, true, jt);
  }
 
  public void testWithDFS() throws IOException {
    MiniDFSCluster dfs = null;
    MiniMRCluster mr = null;
    FileSystem fileSys = null;
    try {
      final int taskTrackers = 4;

      Configuration conf = new Configuration();
      dfs = new MiniDFSCluster(conf, 4, true, null);
      fileSys = dfs.getFileSystem();
      mr = new MiniMRCluster(taskTrackers, fileSys.getUri().toString(), 1);
      JobTracker jt = mr.getJobTrackerRunner().getJobTracker();
      final Path inDir = new Path("./input");
      final Path outDir = new Path("./output");
      String input = "The quick brown fox\nhas many silly\nred fox sox\n";
      // launch job with fail tasks
      JobConf jobConf = mr.createJobConf();
      // turn down the completion poll interval from the 5 second default
      // for better test performance.
      jobConf.set(JobClient.NetworkedJob.COMPLETION_POLL_INTERVAL_KEY, "50");
      jobConf.setOutputCommitter(CommitterWithLogs.class);
      RunningJob rJob = launchJob(jobConf, inDir, outDir, input);
      rJob.waitForCompletion();
      validateJob(rJob, jt);
      // launch job with fail tasks and fail-cleanups
      fileSys.delete(outDir, true);
      jobConf.setOutputCommitter(CommitterWithFailTaskCleanup.class);
      rJob = launchJob(jobConf, inDir, outDir, input);
      rJob.waitForCompletion();
      validateJob(rJob, jt);
      fileSys.delete(outDir, true);
      jobConf.setOutputCommitter(CommitterWithFailTaskCleanup2.class);
      rJob = launchJob(jobConf, inDir, outDir, input);
      rJob.waitForCompletion();
      validateJob(rJob, jt);
    } finally {
      if (dfs != null) { dfs.shutdown(); }
      if (mr != null) { mr.shutdown(); }
    }
  }
View Full Code Here

    // should also return false.
    assertFalse(rr.nextKeyValue());
  }

  public void testSplitPlacement() throws IOException {
    MiniDFSCluster dfs = null;
    FileSystem fileSys = null;
    try {
      /* Start 3 datanodes, one each in rack r1, r2, r3. Create three files
       * 1) file1, just after starting the datanode on r1, with
       *    a repl factor of 1, and,
       * 2) file2, just after starting the datanode on r2, with
       *    a repl factor of 2, and,
       * 3) file3 after starting the all three datanodes, with a repl
       *    factor of 3.
       * At the end, file1 will be present on only datanode1, file2 will be
       * present on datanode 1 and datanode2 and
       * file3 will be present on all datanodes.
       */
      Configuration conf = new Configuration();
      conf.setBoolean("dfs.replication.considerLoad", false);
      dfs = new MiniDFSCluster(conf, 1, true, rack1, hosts1);
      dfs.waitActive();

      fileSys = dfs.getFileSystem();
      if (!fileSys.mkdirs(inDir)) {
        throw new IOException("Mkdirs failed to create " + inDir.toString());
      }
      Path file1 = new Path(dir1 + "/file1");
      writeFile(conf, file1, (short)1, 1);
      dfs.startDataNodes(conf, 1, true, null, rack2, hosts2, null);
      dfs.waitActive();

      // create file on two datanodes.
      Path file2 = new Path(dir2 + "/file2");
      writeFile(conf, file2, (short)2, 2);

      // split it using a CombinedFile input format
      DummyInputFormat inFormat = new DummyInputFormat();
      Job job = new Job(conf);
      FileInputFormat.setInputPaths(job, dir1 + "," + dir2);
      inFormat.setMinSplitSizeRack(BLOCKSIZE);
      List<InputSplit> splits = inFormat.getSplits(job);
      System.out.println("Made splits(Test1): " + splits.size());

      // make sure that each split has different locations
      for (InputSplit split : splits) {
        System.out.println("File split(Test1): " + split);
      }
      assertEquals(splits.size(), 2);
      CombineFileSplit fileSplit = (CombineFileSplit) splits.get(0);
      assertEquals(fileSplit.getNumPaths(), 2);
      assertEquals(fileSplit.getLocations().length, 1);
      assertEquals(fileSplit.getPath(0).getName(), file2.getName());
      assertEquals(fileSplit.getOffset(0), 0);
      assertEquals(fileSplit.getLength(0), BLOCKSIZE);
      assertEquals(fileSplit.getPath(1).getName(), file2.getName());
      assertEquals(fileSplit.getOffset(1), BLOCKSIZE);
      assertEquals(fileSplit.getLength(1), BLOCKSIZE);
      assertEquals(fileSplit.getLocations()[0], hosts2[0]); // should be on r2
      fileSplit = (CombineFileSplit) splits.get(1);
      assertEquals(fileSplit.getNumPaths(), 1);
      assertEquals(fileSplit.getLocations().length, 1);
      assertEquals(fileSplit.getPath(0).getName(), file1.getName());
      assertEquals(fileSplit.getOffset(0), 0);
      assertEquals(fileSplit.getLength(0), BLOCKSIZE);
      assertEquals(fileSplit.getLocations()[0], hosts1[0]); // should be on r1

      // create another file on 3 datanodes and 3 racks.
      dfs.startDataNodes(conf, 1, true, null, rack3, hosts3, null);
      dfs.waitActive();
      Path file3 = new Path(dir3 + "/file3");
      writeFile(conf, new Path(dir3 + "/file3"), (short)3, 3);
      inFormat = new DummyInputFormat();
      FileInputFormat.setInputPaths(job, dir1 + "," + dir2 + "," + dir3);
      inFormat.setMinSplitSizeRack(BLOCKSIZE);
      splits = inFormat.getSplits(job);
      for (InputSplit split : splits) {
        System.out.println("File split(Test2): " + split);
      }
      assertEquals(splits.size(), 3);
      fileSplit = (CombineFileSplit) splits.get(0);
      assertEquals(fileSplit.getNumPaths(), 3);
      assertEquals(fileSplit.getLocations().length, 1);
      assertEquals(fileSplit.getPath(0).getName(), file3.getName());
      assertEquals(fileSplit.getOffset(0), 0);
      assertEquals(fileSplit.getLength(0), BLOCKSIZE);
      assertEquals(fileSplit.getPath(1).getName(), file3.getName());
      assertEquals(fileSplit.getOffset(1), BLOCKSIZE);
      assertEquals(fileSplit.getLength(1), BLOCKSIZE);
      assertEquals(fileSplit.getPath(2).getName(), file3.getName());
      assertEquals(fileSplit.getOffset(2), 2 * BLOCKSIZE);
      assertEquals(fileSplit.getLength(2), BLOCKSIZE);
      assertEquals(fileSplit.getLocations()[0], hosts3[0]); // should be on r3
      fileSplit = (CombineFileSplit) splits.get(1);
      assertEquals(fileSplit.getNumPaths(), 2);
      assertEquals(fileSplit.getLocations().length, 1);
      assertEquals(fileSplit.getPath(0).getName(), file2.getName());
      assertEquals(fileSplit.getOffset(0), 0);
      assertEquals(fileSplit.getLength(0), BLOCKSIZE);
      assertEquals(fileSplit.getPath(1).getName(), file2.getName());
      assertEquals(fileSplit.getOffset(1), BLOCKSIZE);
      assertEquals(fileSplit.getLength(1), BLOCKSIZE);
      assertEquals(fileSplit.getLocations()[0], hosts2[0]); // should be on r2
      fileSplit = (CombineFileSplit) splits.get(2);
      assertEquals(fileSplit.getNumPaths(), 1);
      assertEquals(fileSplit.getLocations().length, 1);
      assertEquals(fileSplit.getPath(0).getName(), file1.getName());
      assertEquals(fileSplit.getOffset(0), 0);
      assertEquals(fileSplit.getLength(0), BLOCKSIZE);
      assertEquals(fileSplit.getLocations()[0], hosts1[0]); // should be on r1

      // create file4 on all three racks
      Path file4 = new Path(dir4 + "/file4");
      writeFile(conf, file4, (short)3, 3);
      inFormat = new DummyInputFormat();
      FileInputFormat.setInputPaths(job, dir1 + "," + dir2 + "," + dir3 + "," + dir4);
      inFormat.setMinSplitSizeRack(BLOCKSIZE);
      splits = inFormat.getSplits(job);
      for (InputSplit split : splits) {
        System.out.println("File split(Test3): " + split);
      }
      assertEquals(splits.size(), 3);
      fileSplit = (CombineFileSplit) splits.get(0);
      assertEquals(fileSplit.getNumPaths(), 6);
      assertEquals(fileSplit.getLocations().length, 1);
      assertEquals(fileSplit.getPath(0).getName(), file3.getName());
      assertEquals(fileSplit.getOffset(0), 0);
      assertEquals(fileSplit.getLength(0), BLOCKSIZE);
      assertEquals(fileSplit.getPath(1).getName(), file3.getName());
      assertEquals(fileSplit.getOffset(1), BLOCKSIZE);
      assertEquals(fileSplit.getLength(1), BLOCKSIZE);
      assertEquals(fileSplit.getPath(2).getName(), file3.getName());
      assertEquals(fileSplit.getOffset(2), 2 * BLOCKSIZE);
      assertEquals(fileSplit.getLength(2), BLOCKSIZE);
      assertEquals(fileSplit.getLocations()[0], hosts3[0]); // should be on r3
      fileSplit = (CombineFileSplit) splits.get(1);
      assertEquals(fileSplit.getNumPaths(), 2);
      assertEquals(fileSplit.getLocations().length, 1);
      assertEquals(fileSplit.getPath(0).getName(), file2.getName());
      assertEquals(fileSplit.getOffset(0), 0);
      assertEquals(fileSplit.getLength(0), BLOCKSIZE);
      assertEquals(fileSplit.getPath(1).getName(), file2.getName());
      assertEquals(fileSplit.getOffset(1), BLOCKSIZE);
      assertEquals(fileSplit.getLength(1), BLOCKSIZE);
      assertEquals(fileSplit.getLocations()[0], hosts2[0]); // should be on r2
      fileSplit = (CombineFileSplit) splits.get(2);
      assertEquals(fileSplit.getNumPaths(), 1);
      assertEquals(fileSplit.getLocations().length, 1);
      assertEquals(fileSplit.getPath(0).getName(), file1.getName());
      assertEquals(fileSplit.getOffset(0), 0);
      assertEquals(fileSplit.getLength(0), BLOCKSIZE);
      assertEquals(fileSplit.getLocations()[0], hosts1[0]); // should be on r1

      // maximum split size is 2 blocks
      inFormat = new DummyInputFormat();
      inFormat.setMinSplitSizeNode(BLOCKSIZE);
      inFormat.setMaxSplitSize(2*BLOCKSIZE);
      FileInputFormat.setInputPaths(job,
        dir1 + "," + dir2 + "," + dir3 + "," + dir4);
      splits = inFormat.getSplits(job);
      for (InputSplit split : splits) {
        System.out.println("File split(Test4): " + split);
      }
      assertEquals(splits.size(), 5);
      fileSplit = (CombineFileSplit) splits.get(0);
      assertEquals(fileSplit.getNumPaths(), 2);
      assertEquals(fileSplit.getLocations().length, 1);
      assertEquals(fileSplit.getPath(0).getName(), file3.getName());
      assertEquals(fileSplit.getOffset(0), 0);
      assertEquals(fileSplit.getLength(0), BLOCKSIZE);
      assertEquals(fileSplit.getPath(1).getName(), file3.getName());
      assertEquals(fileSplit.getOffset(1), BLOCKSIZE);
      assertEquals(fileSplit.getLength(1), BLOCKSIZE);
      assertEquals(fileSplit.getLocations()[0], "host3.rack3.com");
      fileSplit = (CombineFileSplit) splits.get(1);
      assertEquals(fileSplit.getPath(0).getName(), file3.getName());
      assertEquals(fileSplit.getOffset(0), 2 * BLOCKSIZE);
      assertEquals(fileSplit.getLength(0), BLOCKSIZE);
      assertEquals(fileSplit.getPath(1).getName(), file4.getName());
      assertEquals(fileSplit.getOffset(1), 0);
      assertEquals(fileSplit.getLength(1), BLOCKSIZE);
      assertEquals(fileSplit.getLocations()[0], "host3.rack3.com");
      fileSplit = (CombineFileSplit) splits.get(2);
      assertEquals(fileSplit.getNumPaths(), 2);
      assertEquals(fileSplit.getLocations().length, 1);
      assertEquals(fileSplit.getPath(0).getName(), file4.getName());
      assertEquals(fileSplit.getOffset(0), BLOCKSIZE);
      assertEquals(fileSplit.getLength(0), BLOCKSIZE);
      assertEquals(fileSplit.getPath(1).getName(), file4.getName());
      assertEquals(fileSplit.getOffset(1), 2 * BLOCKSIZE);
      assertEquals(fileSplit.getLength(1), BLOCKSIZE);
      assertEquals(fileSplit.getLocations()[0], "host3.rack3.com");

      // maximum split size is 3 blocks
      inFormat = new DummyInputFormat();
      inFormat.setMinSplitSizeNode(BLOCKSIZE);
      inFormat.setMaxSplitSize(3*BLOCKSIZE);
      FileInputFormat.setInputPaths(job,
        dir1 + "," + dir2 + "," + dir3 + "," + dir4);
      splits = inFormat.getSplits(job);
      for (InputSplit split : splits) {
        System.out.println("File split(Test1): " + split);
      }
      assertEquals(splits.size(), 4);
      fileSplit = (CombineFileSplit) splits.get(0);
      assertEquals(fileSplit.getNumPaths(), 3);
      assertEquals(fileSplit.getLocations().length, 1);
      assertEquals(fileSplit.getPath(0).getName(), file3.getName());
      assertEquals(fileSplit.getOffset(0), 0);
      assertEquals(fileSplit.getLength(0), BLOCKSIZE);
      assertEquals(fileSplit.getPath(1).getName(), file3.getName());
      assertEquals(fileSplit.getOffset(1), BLOCKSIZE);
      assertEquals(fileSplit.getLength(1), BLOCKSIZE);
      assertEquals(fileSplit.getPath(2).getName(), file3.getName());
      assertEquals(fileSplit.getOffset(2), 2 * BLOCKSIZE);
      assertEquals(fileSplit.getLength(2), BLOCKSIZE);
      assertEquals(fileSplit.getLocations()[0], "host3.rack3.com");
      fileSplit = (CombineFileSplit) splits.get(1);
      assertEquals(fileSplit.getPath(0).getName(), file4.getName());
      assertEquals(fileSplit.getOffset(0), 0);
      assertEquals(fileSplit.getLength(0), BLOCKSIZE);
      assertEquals(fileSplit.getPath(1).getName(), file4.getName());
      assertEquals(fileSplit.getOffset(1), BLOCKSIZE);
      assertEquals(fileSplit.getLength(1), BLOCKSIZE);
      assertEquals(fileSplit.getPath(2).getName(), file4.getName());
      assertEquals(fileSplit.getOffset(2)2 * BLOCKSIZE);
      assertEquals(fileSplit.getLength(2), BLOCKSIZE);
      assertEquals(fileSplit.getLocations()[0], "host3.rack3.com");
      fileSplit = (CombineFileSplit) splits.get(2);
      assertEquals(fileSplit.getNumPaths(), 2);
      assertEquals(fileSplit.getLocations().length, 1);
      assertEquals(fileSplit.getPath(0).getName(), file2.getName());
      assertEquals(fileSplit.getOffset(0), 0);
      assertEquals(fileSplit.getLength(0), BLOCKSIZE);
      assertEquals(fileSplit.getPath(1).getName(), file2.getName());
      assertEquals(fileSplit.getOffset(1), BLOCKSIZE);
      assertEquals(fileSplit.getLength(1), BLOCKSIZE);
      assertEquals(fileSplit.getLocations()[0], "host2.rack2.com");
      fileSplit = (CombineFileSplit) splits.get(3);
      assertEquals(fileSplit.getNumPaths(), 1);
      assertEquals(fileSplit.getLocations().length, 1);
      assertEquals(fileSplit.getPath(0).getName(), file1.getName());
      assertEquals(fileSplit.getOffset(0), 0);
      assertEquals(fileSplit.getLength(0), BLOCKSIZE);
      assertEquals(fileSplit.getLocations()[0], "host1.rack1.com");

      // maximum split size is 4 blocks
      inFormat = new DummyInputFormat();
      inFormat.setMaxSplitSize(4*BLOCKSIZE);
      FileInputFormat.setInputPaths(job, dir1 + "," + dir2 + "," + dir3 + "," + dir4);
      splits = inFormat.getSplits(job);
      for (InputSplit split : splits) {
        System.out.println("File split(Test1): " + split);
      }
      assertEquals(splits.size(), 3);
      fileSplit = (CombineFileSplit) splits.get(0);
      assertEquals(fileSplit.getNumPaths(), 4);
      assertEquals(fileSplit.getLocations().length, 1);
      assertEquals(fileSplit.getPath(0).getName(), file3.getName());
      assertEquals(fileSplit.getOffset(0), 0);
      assertEquals(fileSplit.getLength(0), BLOCKSIZE);
      assertEquals(fileSplit.getPath(1).getName(), file3.getName());
      assertEquals(fileSplit.getOffset(1), BLOCKSIZE);
      assertEquals(fileSplit.getLength(1), BLOCKSIZE);
      assertEquals(fileSplit.getPath(2).getName(), file3.getName());
      assertEquals(fileSplit.getOffset(2), 2 * BLOCKSIZE);
      assertEquals(fileSplit.getLength(2), BLOCKSIZE);
      assertEquals(fileSplit.getLocations()[0], "host3.rack3.com");
      fileSplit = (CombineFileSplit) splits.get(1);
      assertEquals(fileSplit.getNumPaths(), 4);
      assertEquals(fileSplit.getPath(0).getName(), file2.getName());
      assertEquals(fileSplit.getOffset(0), 0);
      assertEquals(fileSplit.getLength(0), BLOCKSIZE);
      assertEquals(fileSplit.getPath(1).getName(), file2.getName());
      assertEquals(fileSplit.getOffset(1), BLOCKSIZE);
      assertEquals(fileSplit.getLength(1), BLOCKSIZE);
      assertEquals(fileSplit.getPath(2).getName(), file4.getName());
      assertEquals(fileSplit.getOffset(2), BLOCKSIZE);
      assertEquals(fileSplit.getLength(2), BLOCKSIZE);
      assertEquals(fileSplit.getPath(3).getName(), file4.getName());
      assertEquals(fileSplit.getOffset(3)2 * BLOCKSIZE);
      assertEquals(fileSplit.getLength(3), BLOCKSIZE);
      assertEquals(fileSplit.getLocations()[0], "host2.rack2.com");
      fileSplit = (CombineFileSplit) splits.get(2);
      assertEquals(fileSplit.getNumPaths(), 1);
      assertEquals(fileSplit.getLocations().length, 1);
      assertEquals(fileSplit.getPath(0).getName(), file1.getName());
      assertEquals(fileSplit.getOffset(0), 0);
      assertEquals(fileSplit.getLength(0), BLOCKSIZE);
      assertEquals(fileSplit.getLocations()[0], hosts1[0]); // should be on r1

      // maximum split size is 7 blocks and min is 3 blocks
      inFormat = new DummyInputFormat();
      inFormat.setMaxSplitSize(7*BLOCKSIZE);
      inFormat.setMinSplitSizeNode(3*BLOCKSIZE);
      inFormat.setMinSplitSizeRack(3*BLOCKSIZE);
      FileInputFormat.setInputPaths(job,
        dir1 + "," + dir2 + "," + dir3 + "," + dir4);
      splits = inFormat.getSplits(job);
      for (InputSplit split : splits) {
        System.out.println("File split(Test1): " + split);
      }
      assertEquals(splits.size(), 2);
      fileSplit = (CombineFileSplit) splits.get(0);
      assertEquals(fileSplit.getNumPaths(), 6);
      assertEquals(fileSplit.getLocations().length, 1);
      assertEquals(fileSplit.getLocations()[0], "host3.rack3.com");
      fileSplit = (CombineFileSplit) splits.get(1);
      assertEquals(fileSplit.getNumPaths(), 3);
      assertEquals(fileSplit.getLocations().length, 1);
      assertEquals(fileSplit.getLocations()[0], "host1.rack1.com");

      // Rack 1 has file1, file2 and file3 and file4
      // Rack 2 has file2 and file3 and file4
      // Rack 3 has file3 and file4
      // setup a filter so that only file1 and file2 can be combined
      inFormat = new DummyInputFormat();
      FileInputFormat.addInputPath(job, inDir);
      inFormat.setMinSplitSizeRack(1); // everything is at least rack local
      inFormat.createPool(new TestFilter(dir1),
                          new TestFilter(dir2));
      splits = inFormat.getSplits(job);
      for (InputSplit split : splits) {
        System.out.println("File split(Test1): " + split);
      }
      assertEquals(splits.size(), 3);
      fileSplit = (CombineFileSplit) splits.get(0);
      assertEquals(fileSplit.getNumPaths(), 2);
      assertEquals(fileSplit.getLocations().length, 1);
      assertEquals(fileSplit.getLocations()[0], hosts2[0]); // should be on r2
      fileSplit = (CombineFileSplit) splits.get(1);
      assertEquals(fileSplit.getNumPaths(), 1);
      assertEquals(fileSplit.getLocations().length, 1);
      assertEquals(fileSplit.getLocations()[0], hosts1[0]); // should be on r1
      fileSplit = (CombineFileSplit) splits.get(2);
      assertEquals(fileSplit.getNumPaths(), 6);
      assertEquals(fileSplit.getLocations().length, 1);
      assertEquals(fileSplit.getLocations()[0], hosts3[0]); // should be on r3

      // measure performance when there are multiple pools and
      // many files in each pool.
      int numPools = 100;
      int numFiles = 1000;
      DummyInputFormat1 inFormat1 = new DummyInputFormat1();
      for (int i = 0; i < numFiles; i++) {
        FileInputFormat.setInputPaths(job, file1);
      }
      inFormat1.setMinSplitSizeRack(1); // everything is at least rack local
      final Path dirNoMatch1 = new Path(inDir, "/dirxx");
      final Path dirNoMatch2 = new Path(inDir, "/diryy");
      for (int i = 0; i < numPools; i++) {
        inFormat1.createPool(new TestFilter(dirNoMatch1),
                            new TestFilter(dirNoMatch2));
      }
      long start = System.currentTimeMillis();
      splits = inFormat1.getSplits(job);
      long end = System.currentTimeMillis();
      System.out.println("Elapsed time for " + numPools + " pools " +
                         " and " + numFiles + " files is " +
                         ((end - start)/1000) + " seconds.");
    } finally {
      if (dfs != null) {
        dfs.shutdown();
      }
    }
  }
View Full Code Here

  @Override
  protected void setUp() throws Exception {
    super.setUp();
    final int taskTrackers = 4;
    Configuration conf = new Configuration();
    dfsCluster = new MiniDFSCluster(conf, 4, true, null);

    jc = new JobConf();
    jc.setClass("mapred.jobtracker.taskScheduler", TestTaskScheduler.class,
        TaskScheduler.class);
    jc.setLong("mapred.jobtracker.taskScheduler.maxRunningTasksPerJob", 10L);
View Full Code Here

  private static int numSlaves = 1;

  private void startCluster() throws Exception {
    super.setUp();
    Configuration conf = new Configuration();
    dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null);
    JobConf jConf = new JobConf(conf);
    jConf.setLong("mapred.job.submission.expiry.interval", 6 * 1000);
    mrCluster = new MiniMRCluster(0, 0, numSlaves,
        dfsCluster.getFileSystem().getUri().toString(), 1, null, null, null,
        jConf);
View Full Code Here

    */
  public void testSecureJobExecution() throws Exception {
    LOG.info("Testing secure job submission/execution");
    MiniMRCluster mr = null;
    Configuration conf = new Configuration();
    final MiniDFSCluster dfs = new MiniDFSCluster(conf, 1, true, null);
    try {
      FileSystem fs =
        TestMiniMRWithDFSWithDistinctUsers.
        DFS_UGI.doAs(new PrivilegedExceptionAction<FileSystem>() {
          public FileSystem run() throws IOException {
            return dfs.getFileSystem();
         }
        });
      TestMiniMRWithDFSWithDistinctUsers.mkdir(fs, "/user");
      TestMiniMRWithDFSWithDistinctUsers.mkdir(fs, "/mapred");
      TestMiniMRWithDFSWithDistinctUsers.mkdir(fs,
          conf.get("mapreduce.jobtracker.staging.root.dir",
              "/tmp/hadoop/mapred/staging"));
      UserGroupInformation MR_UGI = UserGroupInformation.getLoginUser();
      mr = new MiniMRCluster(0, 0, 1, dfs.getFileSystem().getUri().toString(),
          1, null, null, MR_UGI);
      JobTracker jt = mr.getJobTrackerRunner().getJobTracker();
      String jobTrackerName = "localhost:" + mr.getJobTrackerPort();
      // cleanup
      dfs.getFileSystem().delete(TEST_DIR, true);

      final Path mapSignalFile = new Path(TEST_DIR, "map-signal");
      final Path reduceSignalFile = new Path(TEST_DIR, "reduce-signal");

      // create a ugi for user 1
      UserGroupInformation user1 =
        TestMiniMRWithDFSWithDistinctUsers.createUGI("user1", false);
      Path inDir = new Path("/user/input");
      Path outDir = new Path("/user/output");
      final JobConf job = mr.createJobConf();

      UtilsForTests.configureWaitingJobConf(job, inDir, outDir, 2, 0,
          "test-submit-job", mapSignalFile.toString(),
          reduceSignalFile.toString());
      job.set(UtilsForTests.getTaskSignalParameter(true),
          mapSignalFile.toString());
      job.set(UtilsForTests.getTaskSignalParameter(false),
          reduceSignalFile.toString());
      LOG.info("Submit job as the actual user (" + user1.getUserName() + ")");
      final JobClient jClient =
        user1.doAs(new PrivilegedExceptionAction<JobClient>() {
          public JobClient run() throws IOException {
            return new JobClient(job);
          }
        });
      RunningJob rJob = user1.doAs(new PrivilegedExceptionAction<RunningJob>() {
        public RunningJob run() throws IOException {
          return jClient.submitJob(job);
        }
      });
      JobID id = rJob.getID();
      LOG.info("Running job " + id);

      // create user2
      UserGroupInformation user2 =
        TestMiniMRWithDFSWithDistinctUsers.createUGI("user2", false);
      JobConf conf_other = mr.createJobConf();
      org.apache.hadoop.hdfs.protocol.ClientProtocol client =
        getDFSClient(conf_other, user2);

      // try accessing mapred.system.dir/jobid/*
      try {
        String path = new URI(jt.getSystemDir()).getPath();
        LOG.info("Try listing the mapred-system-dir as the user ("
            + user2.getUserName() + ")");
        client.getListing(path, HdfsFileStatus.EMPTY_NAME);
        fail("JobTracker system dir is accessible to others");
      } catch (IOException ioe) {
        assertTrue(ioe.toString(),
          ioe.toString().contains("Permission denied"));
      }
      // try accessing ~/.staging/jobid/*
      JobInProgress jip = jt.getJob(id);
      Path jobSubmitDirpath =
        new Path(jip.getJobConf().get("mapreduce.job.dir"));
      try {
        LOG.info("Try accessing the job folder for job " + id + " as the user ("
            + user2.getUserName() + ")");
        client.getListing(jobSubmitDirpath.toUri().getPath(), HdfsFileStatus.EMPTY_NAME);
        fail("User's staging folder is accessible to others");
      } catch (IOException ioe) {
        assertTrue(ioe.toString(),
          ioe.toString().contains("Permission denied"));
      }
      UtilsForTests.signalTasks(dfs, fs, true, mapSignalFile.toString(),
          reduceSignalFile.toString());
      // wait for job to be done
      UtilsForTests.waitTillDone(jClient);

      // check if the staging area is cleaned up
      LOG.info("Check if job submit dir is cleanup or not");
      assertFalse(fs.exists(jobSubmitDirpath));
    } finally {
      if (mr != null) {
        mr.shutdown();
      }
      if (dfs != null) {
        dfs.shutdown();
      }
    }
  }
View Full Code Here

 
  public void testCreate() throws Exception {
    Configuration conf = new Configuration();
    conf.setBoolean("dfs.permissions", true);
    conf.set(FsPermission.UMASK_LABEL, "000");
    MiniDFSCluster cluster = null;
    FileSystem fs = null;

    try {
      cluster = new MiniDFSCluster(conf, 3, true, null);
      cluster.waitActive();
      fs = FileSystem.get(conf);
      FsPermission rootPerm = checkPermission(fs, "/", null);
      FsPermission inheritPerm = FsPermission.createImmutable(
          (short)(rootPerm.toShort() | 0300));

      FsPermission dirPerm = new FsPermission((short)0777);
      fs.mkdirs(new Path("/a1/a2/a3"), dirPerm);
      checkPermission(fs, "/a1", inheritPerm);
      checkPermission(fs, "/a1/a2", inheritPerm);
      checkPermission(fs, "/a1/a2/a3", dirPerm);

      FsPermission filePerm = new FsPermission((short)0444);
      FSDataOutputStream out = fs.create(new Path("/b1/b2/b3.txt"), filePerm,
          true, conf.getInt("io.file.buffer.size", 4096),
          fs.getDefaultReplication(), fs.getDefaultBlockSize(), null);
      out.write(123);
      out.close();
      checkPermission(fs, "/b1", inheritPerm);
      checkPermission(fs, "/b1/b2", inheritPerm);
      checkPermission(fs, "/b1/b2/b3.txt", filePerm);
     
      conf.set(FsPermission.UMASK_LABEL, "022");
      FsPermission permission =
        FsPermission.createImmutable((short)0666);
      FileSystem.mkdirs(fs, new Path("/c1"), new FsPermission(permission));
      FileSystem.create(fs, new Path("/c1/c2.txt"),
          new FsPermission(permission));
      checkPermission(fs, "/c1", permission);
      checkPermission(fs, "/c1/c2.txt", permission);
    } finally {
      try {
        if(fs != null) fs.close();
      } catch(Exception e) {
        LOG.error(StringUtils.stringifyException(e));
      }
      try {
        if(cluster != null) cluster.shutdown();
      } catch(Exception e) {
        LOG.error(StringUtils.stringifyException(e));
      }
    }
  }
View Full Code Here

  }

  public void testFilePermision() throws Exception {
    final Configuration conf = new Configuration();
    conf.setBoolean("dfs.permissions", true);
    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
    cluster.waitActive();

    try {
      FileSystem nnfs = FileSystem.get(conf);
      // test permissions on files that do not exist
      assertFalse(nnfs.exists(CHILD_FILE1));
      try {
        nnfs.setOwner(CHILD_FILE1, "foo", "bar");
        assertTrue(false);
      }
      catch(java.io.FileNotFoundException e) {
        LOG.info("GOOD: got " + e);
      }
      try {
        nnfs.setPermission(CHILD_FILE1, new FsPermission((short)0777));
        assertTrue(false);
      }
      catch(java.io.FileNotFoundException e) {
        LOG.info("GOOD: got " + e);
      }
      // following dir/file creations are legal
      nnfs.mkdirs(CHILD_DIR1);
      FSDataOutputStream out = nnfs.create(CHILD_FILE1);
      byte data[] = new byte[FILE_LEN];
      RAN.nextBytes(data);
      out.write(data);
      out.close();
      nnfs.setPermission(CHILD_FILE1, new FsPermission("700"));

      // following read is legal
      byte dataIn[] = new byte[FILE_LEN];
      FSDataInputStream fin = nnfs.open(CHILD_FILE1);
      int bytesRead = fin.read(dataIn);
      assertTrue(bytesRead == FILE_LEN);
      for(int i=0; i<FILE_LEN; i++) {
        assertEquals(data[i], dataIn[i]);
      }

      ////////////////////////////////////////////////////////////////
      // test illegal file/dir creation
      UserGroupInformation userGroupInfo =
        UserGroupInformation.createUserForTesting(USER_NAME, GROUP_NAMES );
     
      FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf);

      // make sure mkdir of a existing directory that is not owned by
      // this user does not throw an exception.
      userfs.mkdirs(CHILD_DIR1);
     
      // illegal mkdir
      assertTrue(!canMkdirs(userfs, CHILD_DIR2));

      // illegal file creation
      assertTrue(!canCreate(userfs, CHILD_FILE2));

      // illegal file open
      assertTrue(!canOpen(userfs, CHILD_FILE1));

      nnfs.setPermission(ROOT_PATH, new FsPermission((short)0755));
      nnfs.setPermission(CHILD_DIR1, new FsPermission("777"));
      nnfs.setPermission(new Path("/"), new FsPermission((short)0777));
      final Path RENAME_PATH = new Path("/foo/bar");
      userfs.mkdirs(RENAME_PATH);
      assertTrue(canRename(userfs, RENAME_PATH, CHILD_DIR1));
    } finally {
      if(cluster != null) cluster.shutdown();
    }
  }
View Full Code Here

  }
 
  /** copy files from dfs file system to dfs file system */
  public void testCopyFromDfsToDfs() throws Exception {
    String namenode = null;
    MiniDFSCluster cluster = null;
    try {
      Configuration conf = new Configuration();
      cluster = new MiniDFSCluster(conf, 2, true, null);
      final FileSystem hdfs = cluster.getFileSystem();
      namenode = FileSystem.getDefaultUri(conf).toString();
      if (namenode.startsWith("hdfs://")) {
        MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
        ToolRunner.run(new DistCp(conf), new String[] {
                                         "-log",
                                         namenode+"/logs",
                                         namenode+"/srcdat",
                                         namenode+"/destdat"});
        assertTrue("Source and destination directories do not match.",
                   checkFiles(hdfs, "/destdat", files));
        FileSystem fs = FileSystem.get(URI.create(namenode+"/logs"), conf);
        assertTrue("Log directory does not exist.",
                   fs.exists(new Path(namenode+"/logs")));
        deldir(hdfs, "/destdat");
        deldir(hdfs, "/srcdat");
        deldir(hdfs, "/logs");
      }
    } finally {
      if (cluster != null) { cluster.shutdown(); }
    }
  }
View Full Code Here

   * @throws IOException
   * @throws FileNotFoundException
   */
  public void start() throws IOException, FileNotFoundException {
    if (!noDFS) {
      dfs = new MiniDFSCluster(nameNodePort, conf, numDataNodes,
          true, true, dfsOpts, null, null);
      LOG.info("Started MiniDFSCluster -- namenode on port "
          + dfs.getNameNodePort());
    }
    if (!noMR) {
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.MiniDFSCluster

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.