Examples of HFileCorruptionChecker


Examples of org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker

  public void setSidelineDir(String sidelineDir) {
    this.sidelineDir = new Path(sidelineDir);
  }

  protected HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) throws IOException {
    return new HFileCorruptionChecker(getConf(), executor, sidelineCorruptHFiles);
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker

    connect();

    // if corrupt file mode is on, first fix them since they may be opened later
    if (checkCorruptHFiles || sidelineCorruptHFiles) {
      LOG.info("Checking all hfiles for corruption");
      HFileCorruptionChecker hfcc = createHFileCorruptionChecker(sidelineCorruptHFiles);
      setHFileCorruptionChecker(hfcc); // so we can get result
      Collection<String> tables = getIncludedTables();
      Collection<Path> tableDirs = new ArrayList<Path>();
      Path rootdir = FSUtils.getRootDir(getConf());
      if (tables.size() > 0) {
        for (String t : tables) {
          tableDirs.add(FSUtils.getTablePath(rootdir, t));
        }
      } else {
        tableDirs = FSUtils.getTableDirs(FSUtils.getCurrentFileSystem(getConf()), rootdir);
      }
      hfcc.checkTables(tableDirs);
      hfcc.report(errors);
    }

    // check and fix table integrity, region consistency.
    int code = onlineHbck();
    setRetCode(code);
View Full Code Here

Examples of org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker

      HBaseFsck.debugLsr(conf, FSUtils.getRootDir(conf));

      // we cannot enable here because enable never finished due to the corrupt region.
      HBaseFsck res = HbckTestingUtil.doHFileQuarantine(conf, table);
      assertEquals(res.getRetCode(), 0);
      HFileCorruptionChecker hfcc = res.getHFilecorruptionChecker();
      assertEquals(hfcc.getHFilesChecked(), 5);
      assertEquals(hfcc.getCorrupted().size(), 1);
      assertEquals(hfcc.getFailures().size(), 0);
      assertEquals(hfcc.getQuarantined().size(), 1);
      assertEquals(hfcc.getMissing().size(), 0);

      // Its been fixed, verify that we can enable.
      TEST_UTIL.getHBaseAdmin().enableTable(table);
    } finally {
      deleteTable(table);
View Full Code Here

Examples of org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker

     
      String[] args = {"-sidelineCorruptHFiles", "-repairHoles", "-ignorePreCheckPermission", table};
      ExecutorService exec = new ScheduledThreadPoolExecutor(10);
      HBaseFsck res = hbck.exec(exec, args);

      HFileCorruptionChecker hfcc = res.getHFilecorruptionChecker();
      assertEquals(hfcc.getHFilesChecked(), check);
      assertEquals(hfcc.getCorrupted().size(), corrupt);
      assertEquals(hfcc.getFailures().size(), fail);
      assertEquals(hfcc.getQuarantined().size(), quar);
      assertEquals(hfcc.getMissing().size(), missing);

      // its been fixed, verify that we can enable
      HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
      admin.enableTableAsync(table);
      while (!admin.isTableEnabled(table)) {
View Full Code Here

Examples of org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker

    ExecutorService exec = new ScheduledThreadPoolExecutor(10);
    // inject a fault in the hfcc created.
    final FileSystem fs = FileSystem.get(conf);
    HBaseFsck hbck = new HBaseFsck(conf, exec) {
      public HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) throws IOException {
        return new HFileCorruptionChecker(conf, executor, sidelineCorruptHFiles) {
          boolean attemptedFirstHFile = false;
          protected void checkHFile(Path p) throws IOException {
            if (!attemptedFirstHFile) {
              attemptedFirstHFile = true;
              assertTrue(fs.delete(p, true)); // make sure delete happened.
View Full Code Here

Examples of org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker

    ExecutorService exec = new ScheduledThreadPoolExecutor(10);
    // inject a fault in the hfcc created.
    final FileSystem fs = FileSystem.get(conf);
    HBaseFsck hbck = new HBaseFsck(conf, exec) {
      public HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) throws IOException {
        return new HFileCorruptionChecker(conf, executor, sidelineCorruptHFiles) {
          boolean attemptedFirstFamDir = false;
          protected void checkColFamDir(Path p) throws IOException {
            if (!attemptedFirstFamDir) {
              attemptedFirstFamDir = true;
              assertTrue(fs.delete(p, true)); // make sure delete happened.
View Full Code Here

Examples of org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker

    ExecutorService exec = new ScheduledThreadPoolExecutor(10);
    // inject a fault in the hfcc created.
    final FileSystem fs = FileSystem.get(conf);
    HBaseFsck hbck = new HBaseFsck(conf, exec) {
      public HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) throws IOException {
        return new HFileCorruptionChecker(conf, executor, sidelineCorruptHFiles) {
          boolean attemptedFirstRegionDir = false;
          protected void checkRegionDir(Path p) throws IOException {
            if (!attemptedFirstRegionDir) {
              attemptedFirstRegionDir = true;
              assertTrue(fs.delete(p, true)); // make sure delete happened.
View Full Code Here

Examples of org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker

      HBaseFsck.debugLsr(conf, FSUtils.getRootDir(conf));

      // we cannot enable here because enable never finished due to the corrupt region.
      HBaseFsck res = HbckTestingUtil.doHFileQuarantine(conf, table);
      assertEquals(res.getRetCode(), 0);
      HFileCorruptionChecker hfcc = res.getHFilecorruptionChecker();
      assertEquals(hfcc.getHFilesChecked(), 5);
      assertEquals(hfcc.getCorrupted().size(), 1);
      assertEquals(hfcc.getFailures().size(), 0);
      assertEquals(hfcc.getQuarantined().size(), 1);
      assertEquals(hfcc.getMissing().size(), 0);

      // Its been fixed, verify that we can enable.
      TEST_UTIL.getHBaseAdmin().enableTable(table);
    } finally {
      deleteTable(table);
View Full Code Here

Examples of org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker

      String[] args = {"-sidelineCorruptHFiles", "-repairHoles", "-ignorePreCheckPermission",
          table.getNameAsString()};
      ExecutorService exec = new ScheduledThreadPoolExecutor(10);
      HBaseFsck res = hbck.exec(exec, args);

      HFileCorruptionChecker hfcc = res.getHFilecorruptionChecker();
      assertEquals(hfcc.getHFilesChecked(), check);
      assertEquals(hfcc.getCorrupted().size(), corrupt);
      assertEquals(hfcc.getFailures().size(), fail);
      assertEquals(hfcc.getQuarantined().size(), quar);
      assertEquals(hfcc.getMissing().size(), missing);

      // its been fixed, verify that we can enable
      HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
      admin.enableTableAsync(table);
      while (!admin.isTableEnabled(table)) {
View Full Code Here

Examples of org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker

    // inject a fault in the hfcc created.
    final FileSystem fs = FileSystem.get(conf);
    HBaseFsck hbck = new HBaseFsck(conf, exec) {
      @Override
      public HFileCorruptionChecker createHFileCorruptionChecker(boolean sidelineCorruptHFiles) throws IOException {
        return new HFileCorruptionChecker(conf, executor, sidelineCorruptHFiles) {
          boolean attemptedFirstHFile = false;
          @Override
          protected void checkHFile(Path p) throws IOException {
            if (!attemptedFirstHFile) {
              attemptedFirstHFile = true;
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.