Package org.apache.hadoop.hbase.regionserver

Examples of org.apache.hadoop.hbase.regionserver.HRegionFileSystem


   * Take a snapshot of the specified disabled region
   */
  protected void snapshotDisabledRegion(final HRegionInfo regionInfo)
      throws IOException {
    // 2 copy the regionInfo files to the snapshot
    HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs,
      workingDir, regionInfo);

    // check for error for each region
    monitor.rethrowException();

    // 2 for each region, copy over its recovered.edits directory
    Path regionDir = HRegion.getRegionDir(rootDir, regionInfo);
    Path snapshotRegionDir = regionFs.getRegionDir();
    new CopyRecoveredEditsTask(snapshot, monitor, fs, regionDir, snapshotRegionDir).call();
    monitor.rethrowException();
    status.setStatus("Completed copying recovered edits for offline snapshot of table: "
        + snapshotTable);

View Full Code Here


    Path rootDir = TEST_UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
    Path archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);

    // First region, simple with one plain hfile.
    HRegionInfo hri = new HRegionInfo(tableWithRefsName);
    HRegionFileSystem r0fs = HRegionFileSystem.createRegionOnFileSystem(conf,
      fs, FSUtils.getTableDir(archiveDir, hri.getTable()), hri);
    Path storeFile = new Path(rootDir, TEST_HFILE);
    FSDataOutputStream out = fs.create(storeFile);
    out.write(Bytes.toBytes("Test Data"));
    out.close();
    r0fs.commitStoreFile(TEST_FAMILY, storeFile);

    // Second region, used to test the split case.
    // This region contains a reference to the hfile in the first region.
    hri = new HRegionInfo(tableWithRefsName);
    HRegionFileSystem r1fs = HRegionFileSystem.createRegionOnFileSystem(conf,
      fs, new Path(archiveDir, hri.getTable().getNameAsString()), hri);
    storeFile = new Path(rootDir, TEST_HFILE + '.' + r0fs.getRegionInfo().getEncodedName());
    out = fs.create(storeFile);
    out.write(Bytes.toBytes("Test Data"));
    out.close();
    r1fs.commitStoreFile(TEST_FAMILY, storeFile);

    Path tableDir = FSUtils.getTableDir(archiveDir, tableWithRefsName);
    Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
    FileUtil.copy(fs, tableDir, fs, snapshotDir, false, conf);
    SnapshotDescriptionUtils.writeSnapshotInfo(sd, snapshotDir, fs);
View Full Code Here

    Path rootDir = TEST_UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
    Path archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);

    // First region, simple with one plain hfile.
    HRegionInfo hri = new HRegionInfo(tableWithRefsName);
    HRegionFileSystem r0fs = HRegionFileSystem.createRegionOnFileSystem(conf,
      fs, FSUtils.getTableDir(archiveDir, hri.getTable()), hri);
    Path storeFile = new Path(rootDir, TEST_HFILE);
    FSDataOutputStream out = fs.create(storeFile);
    out.write(Bytes.toBytes("Test Data"));
    out.close();
    r0fs.commitStoreFile(TEST_FAMILY, storeFile);

    // Second region, used to test the split case.
    // This region contains a reference to the hfile in the first region.
    hri = new HRegionInfo(tableWithRefsName);
    HRegionFileSystem r1fs = HRegionFileSystem.createRegionOnFileSystem(conf,
      fs, new Path(archiveDir, hri.getTable().getNameAsString()), hri);
    storeFile = new Path(rootDir, TEST_HFILE + '.' + r0fs.getRegionInfo().getEncodedName());
    out = fs.create(storeFile);
    out.write(Bytes.toBytes("Test Data"));
    out.close();
    r1fs.commitStoreFile(TEST_FAMILY, storeFile);

    Path tableDir = FSUtils.getTableDir(archiveDir, tableWithRefsName);
    Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
    FileUtil.copy(fs, tableDir, fs, snapshotDir, false, conf);
    SnapshotDescriptionUtils.writeSnapshotInfo(sd, snapshotDir, fs);
View Full Code Here

     * the store dir to compact as source.
     */
    private static HStore getStore(final Configuration conf, final FileSystem fs,
        final Path tableDir, final HTableDescriptor htd, final HRegionInfo hri,
        final String familyName, final Path tempDir) throws IOException {
      HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, hri) {
        @Override
        public Path getTempDir() {
          return tempDir;
        }
      };
View Full Code Here

     * the store dir to compact as source.
     */
    private static HStore getStore(final Configuration conf, final FileSystem fs,
        final Path tableDir, final HTableDescriptor htd, final HRegionInfo hri,
        final String familyName, final Path tempDir) throws IOException {
      HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, hri) {
        @Override
        public Path getTempDir() {
          return tempDir;
        }
      };
View Full Code Here

          byte[] sk = hri.getStartKey();
          if (sk.length == 0)
            sk = splitAlgo.firstRow();
          String startKey = splitAlgo.rowToStr(sk);

          HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(
              table.getConfiguration(), fs, tableDir, hri, true);

          // check every Column Family for that region
          boolean refFound = false;
          for (HColumnDescriptor c : htd.getFamilies()) {
            if ((refFound = regionFs.hasReferences(htd.getTableName().getNameAsString()))) {
              break;
            }
          }

          // compaction is completed when all reference files are gone
View Full Code Here

          + ClientSnapshotDescriptionUtils.toString(snapshot);
      LOG.info(msg);
      status.setStatus(msg);
      for (HRegionInfo regionInfo : regions) {
        // 2.1 copy the regionInfo files to the snapshot
        HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs,
          snapshotDir, regionInfo);

        // check for error for each region
        monitor.rethrowException();

        // 2.2 for each region, copy over its recovered.edits directory
        Path regionDir = HRegion.getRegionDir(rootDir, regionInfo);
        Path snapshotRegionDir = regionFs.getRegionDir();
        new CopyRecoveredEditsTask(snapshot, monitor, fs, regionDir, snapshotRegionDir).call();
        monitor.rethrowException();
        status.setStatus("Completed copying recovered edits for offline snapshot of table: "
            + snapshotTable);
View Full Code Here

  private void createSnapshot(final Path rootDir, final Path snapshotDir, final HTableDescriptor htd)
      throws IOException {
    // First region, simple with one plain hfile.
    HRegionInfo hri = new HRegionInfo(htd.getTableName());
    HRegionFileSystem r0fs = HRegionFileSystem.createRegionOnFileSystem(conf,
      fs, FSUtils.getTableDir(archiveDir, hri.getTableName()), hri);
    Path storeFile = new Path(rootDir, TEST_HFILE);
    fs.createNewFile(storeFile);
    r0fs.commitStoreFile(TEST_FAMILY, storeFile);

    // Second region, used to test the split case.
    // This region contains a reference to the hfile in the first region.
    hri = new HRegionInfo(htd.getTableName());
    HRegionFileSystem r1fs = HRegionFileSystem.createRegionOnFileSystem(conf,
      fs, FSUtils.getTableDir(archiveDir, hri.getTableName()), hri);
    storeFile = new Path(rootDir, TEST_HFILE + '.' + r0fs.getRegionInfo().getEncodedName());
    fs.createNewFile(storeFile);
    r1fs.commitStoreFile(TEST_FAMILY, storeFile);

    Path tableDir = FSUtils.getTableDir(archiveDir, htd.getTableName());
    FileUtil.copy(fs, tableDir, fs, snapshotDir, false, conf);
  }
View Full Code Here

    FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
    Path rootdir = this.services.getMasterFileSystem().getRootDir();
    Path tabledir = FSUtils.getTableDir(rootdir,
        mergedRegion.getTableName());
    HTableDescriptor htd = getTableDescriptor(mergedRegion.getTableName());
    HRegionFileSystem regionFs = null;
    try {
      regionFs = HRegionFileSystem.openRegionFromFileSystem(
          this.services.getConfiguration(), fs, tabledir, mergedRegion, true);
    } catch (IOException e) {
      LOG.warn("Merged region does not exist: " + mergedRegion.getEncodedName());
    }
    if (regionFs == null || !regionFs.hasReferences(htd)) {
      LOG.debug("Deleting region " + regionA.getRegionNameAsString() + " and "
          + regionB.getRegionNameAsString()
          + " from fs because merged region no longer holds references");
      HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionA);
      HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionB);
View Full Code Here

    FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
    Path rootdir = this.services.getMasterFileSystem().getRootDir();
    Path tabledir = FSUtils.getTableDir(rootdir, daughter.getTableName());

    HRegionFileSystem regionFs = null;
    try {
      regionFs = HRegionFileSystem.openRegionFromFileSystem(
          this.services.getConfiguration(), fs, tabledir, daughter, true);
    } catch (IOException e) {
      LOG.warn("Daughter region does not exist: " + daughter.getEncodedName());
      return new Pair<Boolean, Boolean>(Boolean.FALSE, Boolean.FALSE);
    }

    boolean references = false;
    HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTableName());
    for (HColumnDescriptor family: parentDescriptor.getFamilies()) {
      if ((references = regionFs.hasReferences(family.getNameAsString()))) {
        break;
      }
    }
    return new Pair<Boolean, Boolean>(Boolean.TRUE, Boolean.valueOf(references));
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.regionserver.HRegionFileSystem

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.