Package org.apache.hadoop.hbase.regionserver

Examples of org.apache.hadoop.hbase.regionserver.HRegion$MutationBatch


    // get the current store files for the region
    List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(TABLE_NAME);
    // make sure we only have 1 region serving this table
    assertEquals(1, servingRegions.size());
    HRegion region = servingRegions.get(0);

    // and load the table
    UTIL.loadRegion(region, TEST_FAM);

    // shutdown the table so we can manipulate the files
    admin.disableTable(TABLE_NAME);

    FileSystem fs = UTIL.getTestFileSystem();

    // now attempt to depose the region
    Path regionDir = HRegion.getRegionDir(region.getTableDir().getParent(), region.getRegionInfo());

    HFileArchiver.archiveRegion(UTIL.getConfiguration(), fs, region.getRegionInfo());

    // check for the existence of the archive directory and some files in it
    Path archiveDir = HFileArchiveTestingUtil.getRegionArchiveDir(UTIL.getConfiguration(), region);
    assertTrue(fs.exists(archiveDir));
View Full Code Here


    // get the current store files for the region
    List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(TABLE_NAME);
    // make sure we only have 1 region serving this table
    assertEquals(1, servingRegions.size());
    HRegion region = servingRegions.get(0);

    FileSystem fs = region.getFilesystem();

    // make sure there are some files in the regiondir
    Path rootDir = FSUtils.getRootDir(fs.getConf());
    Path regionDir = HRegion.getRegionDir(rootDir, region.getRegionInfo());
    FileStatus[] regionFiles = FSUtils.listStatus(fs, regionDir, null);
    Assert.assertNotNull("No files in the region directory", regionFiles);
    if (LOG.isDebugEnabled()) {
      List<Path> files = new ArrayList<Path>();
      for (FileStatus file : regionFiles) {
        files.add(file.getPath());
      }
      LOG.debug("Current files:" + files);
    }
    // delete the visible folders so we just have hidden files/folders
    final PathFilter dirFilter = new FSUtils.DirFilter(fs);
    PathFilter nonHidden = new PathFilter() {
      @Override
      public boolean accept(Path file) {
        return dirFilter.accept(file) && !file.getName().toString().startsWith(".");
      }
    };
    FileStatus[] storeDirs = FSUtils.listStatus(fs, regionDir, nonHidden);
    for (FileStatus store : storeDirs) {
      LOG.debug("Deleting store for test");
      fs.delete(store.getPath(), true);
    }

    // then archive the region
    HFileArchiver.archiveRegion(UTIL.getConfiguration(), fs, region.getRegionInfo());

    // and check to make sure the region directoy got deleted
    assertFalse("Region directory (" + regionDir + "), still exists.", fs.exists(regionDir));

    UTIL.deleteTable(TABLE_NAME);
View Full Code Here

    UTIL.createTable(TABLE_NAME, TEST_FAM);

    List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(TABLE_NAME);
    // make sure we only have 1 region serving this table
    assertEquals(1, servingRegions.size());
    HRegion region = servingRegions.get(0);

    // get the parent RS and monitor
    HRegionServer hrs = UTIL.getRSForFirstRegionInTable(TABLE_NAME);
    FileSystem fs = hrs.getFileSystem();

    // put some data on the region
    LOG.debug("-------Loading table");
    UTIL.loadRegion(region, TEST_FAM);

    // get the hfiles in the region
    List<HRegion> regions = hrs.getOnlineRegions(TABLE_NAME);
    assertEquals("More that 1 region for test table.", 1, regions.size());

    region = regions.get(0);
    // wait for all the compactions to complete
    region.waitForFlushesAndCompactions();

    // disable table to prevent new updates
    UTIL.getHBaseAdmin().disableTable(TABLE_NAME);
    LOG.debug("Disabled table");

    // remove all the files from the archive to get a fair comparison
    clearArchiveDirectory();

    // then get the current store files
    Path regionDir = region.getRegionDir();
    List<String> storeFiles = getRegionStoreFiles(fs, regionDir);

    // then delete the table so the hfiles get archived
    UTIL.deleteTable(TABLE_NAME);
View Full Code Here

    UTIL.createTable(TABLE_NAME, TEST_FAM);

    List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(TABLE_NAME);
    // make sure we only have 1 region serving this table
    assertEquals(1, servingRegions.size());
    HRegion region = servingRegions.get(0);

    // get the parent RS and monitor
    HRegionServer hrs = UTIL.getRSForFirstRegionInTable(TABLE_NAME);
    FileSystem fs = hrs.getFileSystem();

    // put some data on the region
    LOG.debug("-------Loading table");
    UTIL.loadRegion(region, TEST_FAM);

    // get the hfiles in the region
    List<HRegion> regions = hrs.getOnlineRegions(TABLE_NAME);
    assertEquals("More that 1 region for test table.", 1, regions.size());

    region = regions.get(0);
    // wait for all the compactions to complete
    region.waitForFlushesAndCompactions();

    // disable table to prevent new updates
    UTIL.getHBaseAdmin().disableTable(TABLE_NAME);
    LOG.debug("Disabled table");

    // remove all the files from the archive to get a fair comparison
    clearArchiveDirectory();

    // then get the current store files
    Path regionDir = region.getRegionDir();
    List<String> storeFiles = getRegionStoreFiles(fs, regionDir);

    // then delete the table so the hfiles get archived
    UTIL.getHBaseAdmin().deleteColumn(TABLE_NAME, TEST_FAM);
View Full Code Here

    htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
    htd.setOwner(USER_OWNER);
    admin.createTable(htd);
    TEST_UTIL.waitTableEnabled(TEST_TABLE, 5000);

    HRegion region = TEST_UTIL.getHBaseCluster().getRegions(TEST_TABLE).get(0);
    RegionCoprocessorHost rcpHost = region.getCoprocessorHost();
    RCP_ENV = rcpHost.createEnvironment(AccessController.class, ACCESS_CONTROLLER,
      Coprocessor.PRIORITY_HIGHEST, 1, conf);

    // initilize access control
    HTable acl = new HTable(conf, AccessControlLists.ACL_TABLE_NAME);
View Full Code Here

      throws IOException {
    HTableDescriptor htd = new HTableDescriptor(tableName);
    htd.addFamily(hcd);
    HRegionInfo info =
        new HRegionInfo(Bytes.toBytes(tableName), null, null, false);
    HRegion region =
        HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), htd);
    return region;
  }
View Full Code Here

    public void compact(final Path path, final boolean compactOnce, final boolean major) throws IOException {
      if (isFamilyDir(fs, path)) {
        Path regionDir = path.getParent();
        Path tableDir = regionDir.getParent();
        HTableDescriptor htd = FSTableDescriptors.getTableDescriptor(fs, tableDir);
        HRegion region = loadRegion(fs, conf, htd, regionDir);
        compactStoreFiles(region, path, compactOnce, major);
      } else if (isRegionDir(fs, path)) {
        Path tableDir = path.getParent();
        HTableDescriptor htd = FSTableDescriptors.getTableDescriptor(fs, tableDir);
        compactRegion(htd, path, compactOnce, major);
View Full Code Here

      }
    }

    private void compactRegion(final HTableDescriptor htd, final Path regionDir,
        final boolean compactOnce, final boolean major) throws IOException {
      HRegion region = loadRegion(fs, conf, htd, regionDir);
      LOG.info("Compact table=" + htd.getNameAsString() +
        " region=" + region.getRegionNameAsString());
      for (Path familyDir: FSUtils.getFamilyDirs(fs, regionDir)) {
        compactStoreFiles(region, familyDir, compactOnce, major);
      }
    }
View Full Code Here

    return HRegion.createHRegion(hri, testDir, conf, desc);
  }

  protected HRegion openClosedRegion(final HRegion closedRegion)
  throws IOException {
    HRegion r = new HRegion(closedRegion);
    r.initialize();
    return r;
  }
View Full Code Here

      //we allow this to pass through in "simple" security mode
      //for mini cluster testing
      throw new DoNotRetryIOException("User token cannot be null");
    }

    HRegion region = env.getRegion();
    boolean bypass = false;
    if (region.getCoprocessorHost() != null) {
      bypass = region.getCoprocessorHost().preBulkLoadHFile(familyPaths);
    }
    boolean loaded = false;
    final IOException[] es = new IOException[1];
    if (!bypass) {
      loaded = ugi.doAs(new PrivilegedAction<Boolean>() {
        @Override
        public Boolean run() {
          FileSystem fs = null;
          try {
            Configuration conf = env.getConfiguration();
            fs = FileSystem.get(conf);
            for(Pair<byte[], String> el: familyPaths) {
              Path p = new Path(el.getSecond());
              LOG.debug("Setting permission for: " + p);
              fs.setPermission(p, PERM_ALL_ACCESS);
              Path stageFamily = new Path(bulkToken, Bytes.toString(el.getFirst()));
              if(!fs.exists(stageFamily)) {
                fs.mkdirs(stageFamily);
                fs.setPermission(stageFamily, PERM_ALL_ACCESS);
              }
            }
            //We call bulkLoadHFiles as requesting user
            //To enable access prior to staging
            return env.getRegion().bulkLoadHFiles(familyPaths,
                new SecureBulkLoadListener(fs, bulkToken));
          }
          catch(DoNotRetryIOException e){
            es[0] = e;
          }
          catch (Exception e) {
            LOG.error("Failed to complete bulk load", e);
          }
          return false;
        }
      });
    }

    if (es[0] != null) {
      throw es[0];
    }

    if (region.getCoprocessorHost() != null) {
      loaded = region.getCoprocessorHost().postBulkLoadHFile(familyPaths, loaded);
    }
    return loaded;
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.regionserver.HRegion$MutationBatch

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.