Package org.apache.hadoop.hdfs

Examples of org.apache.hadoop.hdfs.DFSTestUtil.createFiles()


      conf.setLong("dfs.blockreport.intervalMsec", 10000L);
      cluster = new MiniDFSCluster(conf, 4, true, null);
      String topDir = "/srcdat";
      fs = cluster.getFileSystem();
      cluster.waitActive();
      util.createFiles(fs, topDir);
      util.waitReplication(fs, topDir, (short)3);
      String outStr = runFsck(conf, 0, true, "/");
      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
     
      // Corrupt a block by deleting it
View Full Code Here


      cluster = new MiniDFSCluster(conf, 4, true, null);
      String topDir = "/srcdat";
      String randomString = "HADOOP  ";
      fs = cluster.getFileSystem();
      cluster.waitActive();
      util.createFiles(fs, topDir);
      util.waitReplication(fs, topDir, (short)3);
      String outStr = runFsck(conf, 0, true, "/");
      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
      // Open a file for writing and do not close for now
      Path openFile = new Path(topDir + "/openFile");
View Full Code Here

      cluster = new MiniDFSCluster.Builder(conf).build();
      FileSystem fs = cluster.getFileSystem();

      // create two files with one block each
      DFSTestUtil util = new DFSTestUtil("testCorruptFilesCorruptedBlock", 2, 1, 512);
      util.createFiles(fs, "/srcdat10");

      // fetch bad file list from namenode. There should be none.
      final NameNode namenode = cluster.getNameNode();
      Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = namenode.
        getNamesystem().listCorruptFileBlocks("/", null);
View Full Code Here

      FileSystem fs = cluster.getFileSystem();

      // create two files with one block each
      DFSTestUtil util = new DFSTestUtil("testListCorruptFileBlocksInSafeMode",
                                         2, 1, 512);
      util.createFiles(fs, "/srcdat10");

      // fetch bad file list from namenode. There should be none.
      Collection<FSNamesystem.CorruptFileBlockInfo> badFiles =
        cluster.getNameNode().getNamesystem().listCorruptFileBlocks("/", null);
      assertTrue("Namenode has " + badFiles.size()
View Full Code Here

    try {
      cluster = new MiniDFSCluster.Builder(conf).build();
      cluster.waitActive();
      fs = cluster.getFileSystem();
      DFSTestUtil util = new DFSTestUtil("testGetCorruptFiles", 3, 1, 1024);
      util.createFiles(fs, "/corruptData");

      final NameNode namenode = cluster.getNameNode();
      Collection<FSNamesystem.CorruptFileBlockInfo> corruptFileBlocks =
        namenode.getNamesystem().listCorruptFileBlocks("/corruptData", null);
      int numCorrupt = corruptFileBlocks.size();
View Full Code Here

          ncfb[1].block.getBlockName());
      numCorrupt = corruptFileBlocks.size();
      assertTrue(numCorrupt == 0);
      // Do a listing on a dir which doesn't have any corrupt blocks and
      // validate
      util.createFiles(fs, "/goodData");
      corruptFileBlocks =
        namenode.getNamesystem().listCorruptFileBlocks("/goodData", null);
      numCorrupt = corruptFileBlocks.size();
      assertTrue(numCorrupt == 0);
      util.cleanup(fs, "/corruptData");
View Full Code Here

      cluster = new MiniDFSCluster.Builder(conf).build();
      cluster.waitActive();
      fs = cluster.getFileSystem();
      DistributedFileSystem dfs = (DistributedFileSystem) fs;
      DFSTestUtil util = new DFSTestUtil("testGetCorruptFiles", 3, 1, 1024);
      util.createFiles(fs, "/corruptData");

      RemoteIterator<Path> corruptFileBlocks =
        dfs.listCorruptFileBlocks(new Path("/corruptData"));
      int numCorrupt = countPaths(corruptFileBlocks);
      assertTrue(numCorrupt == 0);
View Full Code Here

        FSNamesystem.DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED;

      // create 110 files with one block each
      DFSTestUtil util = new DFSTestUtil("testMaxCorruptFiles",
          maxCorruptFileBlocks * 3, 1, 512);
      util.createFiles(fs, "/srcdat2", (short) 1);
      util.waitReplication(fs, "/srcdat2", (short) 1);

      // verify that there are no bad blocks.
      final NameNode namenode = cluster.getNameNode();
      Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = namenode.
View Full Code Here

          DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new String[] {});
      numNamenodeDirs = nameNodeDirs.length;
      assertTrue("failed to get number of Namenode StorageDirs",
          numNamenodeDirs != 0);
      FileSystem fs = cluster.getFileSystem();
      files.createFiles(fs, dir);

      rootmtime = fs.getFileStatus(rootpath).getModificationTime();
      rootstatus = fs.getFileStatus(dirpath);
      dirstatus = fs.getFileStatus(dirpath);
View Full Code Here

      final String checkAfterRestart = checkImages(fsn, numNamenodeDirs);
     
      // Modify the system and then perform saveNamespace
      files.cleanup(fs, dir);
      files.createFiles(fs, dir);
      fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
      cluster.getNameNodeRpc().saveNamespace();
      final String checkAfterModify = checkImages(fsn, numNamenodeDirs);
      assertFalse("Modified namespace should change fsimage contents. " +
          "was: " + checkAfterRestart + " now: " + checkAfterModify,
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.