Examples of HRegionFileSystem


Examples of org.apache.hadoop.hbase.regionserver.HRegionFileSystem

        byte[] startKey = Bytes.toBytes(0 + i * 2);
        byte[] endKey = Bytes.toBytes(1 + i * 2);

        // First region, simple with one plain hfile.
        HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKey, endKey);
        HRegionFileSystem rfs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, hri);
        regions[i] = new RegionData(tableDir, hri, 3);
        for (int j = 0; j < regions[i].files.length; ++j) {
          Path storeFile = createStoreFile(rfs.createTempName());
          regions[i].files[j] = rfs.commitStoreFile(TEST_FAMILY, storeFile);
        }

        // Second region, used to test the split case.
        // This region contains a reference to the hfile in the first region.
        startKey = Bytes.toBytes(2 + i * 2);
        endKey = Bytes.toBytes(3 + i * 2);
        hri = new HRegionInfo(htd.getTableName());
        rfs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, hri);
        regions[i+1] = new RegionData(tableDir, hri, regions[i].files.length);
        for (int j = 0; j < regions[i].files.length; ++j) {
          String refName = regions[i].files[j].getName() + '.' + regions[i].hri.getEncodedName();
          Path refFile = createStoreFile(new Path(rootDir, refName));
          regions[i+1].files[j] = rfs.commitStoreFile(TEST_FAMILY, refFile);
        }
      }
      return regions;
    }
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.HRegionFileSystem

    fs.delete(new Path(snapshotDir, regionName), true);
  }

  static SnapshotRegionManifest buildManifestFromDisk (final Configuration conf,
      final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) throws IOException {
    HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, fs,
          tableDir, regionInfo, true);
    SnapshotRegionManifest.Builder manifest = SnapshotRegionManifest.newBuilder();

    // 1. dump region meta info into the snapshot directory
    LOG.debug("Storing region-info for snapshot.");
    manifest.setRegionInfo(HRegionInfo.convert(regionInfo));

    // 2. iterate through all the stores in the region
    LOG.debug("Creating references for hfiles");

    // This ensures that we have an atomic view of the directory as long as we have < ls limit
    // (batch size of the files in a directory) on the namenode. Otherwise, we get back the files in
    // batches and may miss files being added/deleted. This could be more robust (iteratively
    // checking to see if we have all the files until we are sure), but the limit is currently 1000
    // files/batch, far more than the number of store files under a single column family.
    Collection<String> familyNames = regionFs.getFamilies();
    if (familyNames != null) {
      for (String familyName: familyNames) {
        Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(familyName, false);
        if (storeFiles == null) {
          LOG.debug("No files under family: " + familyName);
          continue;
        }
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.HRegionFileSystem

      this.conf = conf;
      this.fs = fs;
    }

    public HRegionFileSystem regionOpen(final HRegionInfo regionInfo) throws IOException {
      HRegionFileSystem snapshotRegionFs = HRegionFileSystem.createRegionOnFileSystem(conf,
        fs, snapshotDir, regionInfo);
      return snapshotRegionFs;
    }
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.HRegionFileSystem

  public void addRegion(final Path tableDir, final HRegionInfo regionInfo) throws IOException {
    // 0. Get the ManifestBuilder/RegionVisitor
    RegionVisitor visitor = createRegionVisitor(desc);

    // Open the RegionFS
    HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, fs,
          tableDir, regionInfo, true);
    monitor.rethrowException();

    // 1. dump region meta info into the snapshot directory
    LOG.debug("Storing region-info for snapshot.");
    Object regionData = visitor.regionOpen(regionInfo);
    monitor.rethrowException();

    // 2. iterate through all the stores in the region
    LOG.debug("Creating references for hfiles");

    // This ensures that we have an atomic view of the directory as long as we have < ls limit
    // (batch size of the files in a directory) on the namenode. Otherwise, we get back the files in
    // batches and may miss files being added/deleted. This could be more robust (iteratively
    // checking to see if we have all the files until we are sure), but the limit is currently 1000
    // files/batch, far more than the number of store files under a single column family.
    Collection<String> familyNames = regionFs.getFamilies();
    if (familyNames != null) {
      for (String familyName: familyNames) {
        Object familyData = visitor.familyOpen(regionData, Bytes.toBytes(familyName));
        monitor.rethrowException();

        Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(familyName);
        if (storeFiles == null) {
          LOG.debug("No files under family: " + familyName);
          continue;
        }
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.HRegionFileSystem

          byte[] sk = hri.getStartKey();
          if (sk.length == 0)
            sk = splitAlgo.firstRow();
          String startKey = splitAlgo.rowToStr(sk);

          HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(
              table.getConfiguration(), fs, tableDir, hri, true);

          // check every Column Family for that region
          boolean refFound = false;
          for (HColumnDescriptor c : htd.getFamilies()) {
            if ((refFound = regionFs.hasReferences(htd.getTableName().getNameAsString()))) {
              break;
            }
          }

          // compaction is completed when all reference files are gone
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.HRegionFileSystem

        byte[] startKey = Bytes.toBytes(0 + i * 2);
        byte[] endKey = Bytes.toBytes(1 + i * 2);

        // First region, simple with one plain hfile.
        HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKey, endKey);
        HRegionFileSystem rfs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, hri);
        regions[i] = new RegionData(tableDir, hri, 3);
        for (int j = 0; j < regions[i].files.length; ++j) {
          Path storeFile = createStoreFile(rfs.createTempName());
          regions[i].files[j] = rfs.commitStoreFile(TEST_FAMILY, storeFile);
        }

        // Second region, used to test the split case.
        // This region contains a reference to the hfile in the first region.
        startKey = Bytes.toBytes(2 + i * 2);
        endKey = Bytes.toBytes(3 + i * 2);
        hri = new HRegionInfo(htd.getTableName());
        rfs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, hri);
        regions[i+1] = new RegionData(tableDir, hri, regions[i].files.length);
        for (int j = 0; j < regions[i].files.length; ++j) {
          String refName = regions[i].files[j].getName() + '.' + regions[i].hri.getEncodedName();
          Path refFile = createStoreFile(new Path(rootDir, refName));
          regions[i+1].files[j] = rfs.commitStoreFile(TEST_FAMILY, refFile);
        }
      }
      return regions;
    }
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.HRegionFileSystem

    Path rootDir = TEST_UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
    Path archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);

    // First region, simple with one plain hfile.
    HRegionInfo hri = new HRegionInfo(tableWithRefsName);
    HRegionFileSystem r0fs = HRegionFileSystem.createRegionOnFileSystem(conf,
      fs, FSUtils.getTableDir(archiveDir, hri.getTable()), hri);
    Path storeFile = new Path(rootDir, TEST_HFILE);
    FSDataOutputStream out = fs.create(storeFile);
    out.write(Bytes.toBytes("Test Data"));
    out.close();
    r0fs.commitStoreFile(TEST_FAMILY, storeFile);

    // Second region, used to test the split case.
    // This region contains a reference to the hfile in the first region.
    hri = new HRegionInfo(tableWithRefsName);
    HRegionFileSystem r1fs = HRegionFileSystem.createRegionOnFileSystem(conf,
      fs, new Path(archiveDir, hri.getTable().getNameAsString()), hri);
    storeFile = new Path(rootDir, TEST_HFILE + '.' + r0fs.getRegionInfo().getEncodedName());
    out = fs.create(storeFile);
    out.write(Bytes.toBytes("Test Data"));
    out.close();
    r1fs.commitStoreFile(TEST_FAMILY, storeFile);

    Path tableDir = FSUtils.getTableDir(archiveDir, tableWithRefsName);
    HTableDescriptor htd = new HTableDescriptor(tableWithRefsName);
    htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
    new FSTableDescriptors(fs, rootDir)
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.HRegionFileSystem

    fs.delete(new Path(snapshotDir, regionName), true);
  }

  static SnapshotRegionManifest buildManifestFromDisk (final Configuration conf,
      final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) throws IOException {
    HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, fs,
          tableDir, regionInfo, true);
    SnapshotRegionManifest.Builder manifest = SnapshotRegionManifest.newBuilder();

    // 1. dump region meta info into the snapshot directory
    LOG.debug("Storing region-info for snapshot.");
    manifest.setRegionInfo(HRegionInfo.convert(regionInfo));

    // 2. iterate through all the stores in the region
    LOG.debug("Creating references for hfiles");

    // This ensures that we have an atomic view of the directory as long as we have < ls limit
    // (batch size of the files in a directory) on the namenode. Otherwise, we get back the files in
    // batches and may miss files being added/deleted. This could be more robust (iteratively
    // checking to see if we have all the files until we are sure), but the limit is currently 1000
    // files/batch, far more than the number of store files under a single column family.
    Collection<String> familyNames = regionFs.getFamilies();
    if (familyNames != null) {
      for (String familyName: familyNames) {
        Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(familyName, false);
        if (storeFiles == null) {
          LOG.debug("No files under family: " + familyName);
          continue;
        }
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.HRegionFileSystem

      this.conf = conf;
      this.fs = fs;
    }

    public HRegionFileSystem regionOpen(final HRegionInfo regionInfo) throws IOException {
      HRegionFileSystem snapshotRegionFs = HRegionFileSystem.createRegionOnFileSystem(conf,
        fs, snapshotDir, regionInfo);
      return snapshotRegionFs;
    }
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.HRegionFileSystem

          byte[] sk = hri.getStartKey();
          if (sk.length == 0)
            sk = splitAlgo.firstRow();
          String startKey = splitAlgo.rowToStr(sk);

          HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(
              table.getConfiguration(), fs, tableDir, hri, true);

          // check every Column Family for that region
          boolean refFound = false;
          for (HColumnDescriptor c : htd.getFamilies()) {
            if ((refFound = regionFs.hasReferences(htd.getTableName().getNameAsString()))) {
              break;
            }
          }

          // compaction is completed when all reference files are gone
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.