Examples of checkFiles()


Examples of org.apache.hadoop.hdfs.DFSTestUtil.checkFiles()

      cluster = new MiniDFSCluster.Builder(conf).format(false)
          .numDataNodes(NUM_DATANODES).build();
      fsn = cluster.getNamesystem();
      FileSystem fs = cluster.getFileSystem();
      assertTrue("Filesystem corrupted after restart.",
                 files.checkFiles(fs, dir));

      final FileStatus newrootstatus = fs.getFileStatus(rootpath);
      assertEquals(rootmtime, newrootstatus.getModificationTime());
      assertEquals(rootstatus.getOwner() + "_XXX", newrootstatus.getOwner());
      assertEquals(rootstatus.getGroup(), newrootstatus.getGroup());
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSTestUtil.checkFiles()

      // test finalized replicas
      final String TopDir = "/test";
      DFSTestUtil util = new DFSTestUtil("TestCrcCorruption", 2, 3, 8*1024);
      util.createFiles(fs, TopDir, (short)3);
      util.waitReplication(fs, TopDir, (short)3);
      util.checkFiles(fs, TopDir);
      cluster.restartDataNodes();
      cluster.waitActive();
      util.checkFiles(fs, TopDir);
    } finally {
      cluster.shutdown();
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSTestUtil.checkFiles()

      util.createFiles(fs, TopDir, (short)3);
      util.waitReplication(fs, TopDir, (short)3);
      util.checkFiles(fs, TopDir);
      cluster.restartDataNodes();
      cluster.waitActive();
      util.checkFiles(fs, TopDir);
    } finally {
      cluster.shutdown();
    }
  }
 
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSTestUtil.checkFiles()

          LOG.info("Deliberately corrupting file " + blocks[idx].getName() +
              " at offset " + position + " length " + length);

          // read all files to trigger detection of corrupted replica
          try {
            util.checkFiles(fs, "/srcdat10");
          } catch (BlockMissingException e) {
            System.out.println("Received BlockMissingException as expected.");
          } catch (IOException e) {
            assertTrue("Corrupted replicas not handled properly. Expecting BlockMissingException " +
                " but received IOException " + e, false);
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSTestUtil.checkFiles()

          LOG.info("Deliberately corrupting file " + blocks[idx].getName() +
              " at offset " + position + " length " + length);

          // read all files to trigger detection of corrupted replica
          try {
            util.checkFiles(fs, "/srcdat10");
          } catch (BlockMissingException e) {
            System.out.println("Received BlockMissingException as expected.");
          } catch (IOException e) {
            assertTrue("Corrupted replicas not handled properly. " +
                       "Expecting BlockMissingException " +
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSTestUtil.checkFiles()

        }
      }

      // read all files to trigger detection of corrupted replica
      try {
        util.checkFiles(fs, "/srcdat10");
      } catch (BlockMissingException e) {
        System.out.println("Received BlockMissingException as expected.");
      } catch (IOException e) {
        assertTrue("Corrupted replicas not handled properly. " +
                   "Expecting BlockMissingException " +
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSTestUtil.checkFiles()

    Path localData = new Path(workDir, "srcData");
    Path remoteData = new Path("srcData");

    util.createFiles(localFs, localData.toUri().getPath());

    boolean dataConsistency = util.checkFiles(localFs, localData.getName());
    assertTrue("Test data corrupted", dataConsistency);

    // Copy files and directories recursively to FTP file system.
    boolean filesCopied = FileUtil.copy(localFs, localData, ftpFs, remoteData,
        false, defaultConf);
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSTestUtil.checkFiles()

    filesCopied = FileUtil.copy(ftpFs, renamedData, localFs, workDir, true,
        defaultConf);
    assertTrue("Copying from FTPFileSystem fails", filesCopied);

    // Check if the data was received completely without any corruption.
    dataConsistency = util.checkFiles(localFs, renamedData.getName());
    assertTrue("Invalid or corrupted data recieved from FTP Server!",
        dataConsistency);

    // Delete local copies
    boolean deleteSuccess = localFs.delete(renamedData, true)
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSTestUtil.checkFiles()

          LOG.info("Deliberately corrupting file " + blocks[idx].getName() +
              " at offset " + position + " length " + length);

          // read all files to trigger detection of corrupted replica
          try {
            util.checkFiles(fs, "/srcdat10");
          } catch (BlockMissingException e) {
            System.out.println("Received BlockMissingException as expected.");
          } catch (IOException e) {
            assertTrue("Corrupted replicas not handled properly. Expecting BlockMissingException " +
                " but received IOException " + e, false);
View Full Code Here

Examples of org.apache.hadoop.hdfs.DFSTestUtil.checkFiles()

      // test finalized replicas
      final String TopDir = "/test";
      DFSTestUtil util = new DFSTestUtil("TestCrcCorruption", 2, 3, 8*1024);
      util.createFiles(fs, TopDir, (short)3);
      util.waitReplication(fs, TopDir, (short)3);
      util.checkFiles(fs, TopDir);
      cluster.restartDataNodes();
      cluster.waitActive();
      util.checkFiles(fs, TopDir);
    } finally {
      cluster.shutdown();
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.