Examples of HRegionFileSystem


Examples of org.apache.hadoop.hbase.regionserver.HRegionFileSystem

    FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
    Path rootdir = this.services.getMasterFileSystem().getRootDir();
    Path tabledir = FSUtils.getTableDir(rootdir, daughter.getTable());

    HRegionFileSystem regionFs = null;
    try {
      regionFs = HRegionFileSystem.openRegionFromFileSystem(
          this.services.getConfiguration(), fs, tabledir, daughter, true);
    } catch (IOException e) {
      LOG.warn("Daughter region does not exist: " + daughter.getEncodedName()
        + ", parent is: " + parent.getEncodedName());
      return new Pair<Boolean, Boolean>(Boolean.FALSE, Boolean.FALSE);
    }

    boolean references = false;
    HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTable());
    for (HColumnDescriptor family: parentDescriptor.getFamilies()) {
      if ((references = regionFs.hasReferences(family.getNameAsString()))) {
        break;
      }
    }
    return new Pair<Boolean, Boolean>(Boolean.TRUE, Boolean.valueOf(references));
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.HRegionFileSystem

    Path rootDir = TEST_UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
    Path archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);

    // First region, simple with one plain hfile.
    HRegionInfo hri = new HRegionInfo(tableWithRefsName);
    HRegionFileSystem r0fs = HRegionFileSystem.createRegionOnFileSystem(conf,
      fs, FSUtils.getTableDir(archiveDir, hri.getTable()), hri);
    Path storeFile = new Path(rootDir, TEST_HFILE);
    FSDataOutputStream out = fs.create(storeFile);
    out.write(Bytes.toBytes("Test Data"));
    out.close();
    r0fs.commitStoreFile(TEST_FAMILY, storeFile);

    // Second region, used to test the split case.
    // This region contains a reference to the hfile in the first region.
    hri = new HRegionInfo(tableWithRefsName);
    HRegionFileSystem r1fs = HRegionFileSystem.createRegionOnFileSystem(conf,
      fs, new Path(archiveDir, hri.getTable().getNameAsString()), hri);
    storeFile = new Path(rootDir, TEST_HFILE + '.' + r0fs.getRegionInfo().getEncodedName());
    out = fs.create(storeFile);
    out.write(Bytes.toBytes("Test Data"));
    out.close();
    r1fs.commitStoreFile(TEST_FAMILY, storeFile);

    Path tableDir = FSUtils.getTableDir(archiveDir, tableWithRefsName);
    Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
    FileUtil.copy(fs, tableDir, fs, snapshotDir, false, conf);
    SnapshotDescriptionUtils.writeSnapshotInfo(sd, snapshotDir, fs);
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.HRegionFileSystem

        byte[] startKey = Bytes.toBytes(0 + i * 2);
        byte[] endKey = Bytes.toBytes(1 + i * 2);

        // First region, simple with one plain hfile.
        HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKey, endKey);
        HRegionFileSystem rfs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, hri);
        regions[i] = new RegionData(tableDir, hri, 3);
        for (int j = 0; j < regions[i].files.length; ++j) {
          Path storeFile = createStoreFile(rfs.createTempName());
          regions[i].files[j] = rfs.commitStoreFile(TEST_FAMILY, storeFile);
        }

        // Second region, used to test the split case.
        // This region contains a reference to the hfile in the first region.
        startKey = Bytes.toBytes(2 + i * 2);
        endKey = Bytes.toBytes(3 + i * 2);
        hri = new HRegionInfo(htd.getTableName());
        rfs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir, hri);
        regions[i+1] = new RegionData(tableDir, hri, regions[i].files.length);
        for (int j = 0; j < regions[i].files.length; ++j) {
          String refName = regions[i].files[j].getName() + '.' + regions[i].hri.getEncodedName();
          Path refFile = createStoreFile(new Path(rootDir, refName));
          regions[i+1].files[j] = rfs.commitStoreFile(TEST_FAMILY, refFile);
        }
      }
      return regions;
    }
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.HRegionFileSystem

      final HRegionInfo regionA, final HRegionInfo regionB) throws IOException {
    FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
    Path rootdir = this.services.getMasterFileSystem().getRootDir();
    Path tabledir = FSUtils.getTableDir(rootdir, mergedRegion.getTable());
    HTableDescriptor htd = getTableDescriptor(mergedRegion.getTable());
    HRegionFileSystem regionFs = null;
    try {
      regionFs = HRegionFileSystem.openRegionFromFileSystem(
          this.services.getConfiguration(), fs, tabledir, mergedRegion, true);
    } catch (IOException e) {
      LOG.warn("Merged region does not exist: " + mergedRegion.getEncodedName());
    }
    if (regionFs == null || !regionFs.hasReferences(htd)) {
      LOG.debug("Deleting region " + regionA.getRegionNameAsString() + " and "
          + regionB.getRegionNameAsString()
          + " from fs because merged region no longer holds references");
      HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionA);
      HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionB);
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.HRegionFileSystem

    FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
    Path rootdir = this.services.getMasterFileSystem().getRootDir();
    Path tabledir = FSUtils.getTableDir(rootdir, daughter.getTable());

    HRegionFileSystem regionFs = null;
    try {
      regionFs = HRegionFileSystem.openRegionFromFileSystem(
          this.services.getConfiguration(), fs, tabledir, daughter, true);
    } catch (IOException e) {
      LOG.warn("Daughter region does not exist: " + daughter.getEncodedName()
        + ", parent is: " + parent.getEncodedName());
      return new Pair<Boolean, Boolean>(Boolean.FALSE, Boolean.FALSE);
    }

    boolean references = false;
    HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTable());
    for (HColumnDescriptor family: parentDescriptor.getFamilies()) {
      if ((references = regionFs.hasReferences(family.getNameAsString()))) {
        break;
      }
    }
    return new Pair<Boolean, Boolean>(Boolean.TRUE, Boolean.valueOf(references));
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.HRegionFileSystem

  public void addRegion(final Path tableDir, final HRegionInfo regionInfo) throws IOException {
    // 0. Get the ManifestBuilder/RegionVisitor
    RegionVisitor visitor = createRegionVisitor(desc);

    // Open the RegionFS
    HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, fs,
          tableDir, regionInfo, true);
    monitor.rethrowException();

    // 1. dump region meta info into the snapshot directory
    LOG.debug("Storing region-info for snapshot.");
    Object regionData = visitor.regionOpen(regionInfo);
    monitor.rethrowException();

    // 2. iterate through all the stores in the region
    LOG.debug("Creating references for hfiles");

    // This ensures that we have an atomic view of the directory as long as we have < ls limit
    // (batch size of the files in a directory) on the namenode. Otherwise, we get back the files in
    // batches and may miss files being added/deleted. This could be more robust (iteratively
    // checking to see if we have all the files until we are sure), but the limit is currently 1000
    // files/batch, far more than the number of store files under a single column family.
    Collection<String> familyNames = regionFs.getFamilies();
    if (familyNames != null) {
      for (String familyName: familyNames) {
        Object familyData = visitor.familyOpen(regionData, Bytes.toBytes(familyName));
        monitor.rethrowException();

        Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(familyName);
        if (storeFiles == null) {
          LOG.debug("No files under family: " + familyName);
          continue;
        }
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.HRegionFileSystem

    Configuration hConf = conf;
    FileSystem fs = FileSystem.get(hConf);
    assertTrue(fs.mkdirs(tablePath));
    HLog hLog = HLogFactory.createHLog(fs, hlogPath, "testRegionScanner", hConf);
    HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
    HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(hConf, fs, tablePath, regionInfo);
    HRegion region = new HRegion(regionFS, hLog, hConf, htd,
                                 new MockRegionServerServices(hConf, null));
    try {
      region.initialize();
      TransactionStateCache cache = new TransactionStateCacheSupplier(hConf).get();
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.HRegionFileSystem

    Configuration hConf = conf;
    FileSystem fs = FileSystem.get(hConf);
    assertTrue(fs.mkdirs(tablePath));
    HLog hLog = HLogFactory.createHLog(fs, hlogPath, "testDeleteFiltering", hConf);
    HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
    HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(hConf, fs, tablePath, regionInfo);
    HRegion region = new HRegion(regionFS, hLog, hConf, htd, new MockRegionServerServices(hConf, null));
    try {
      region.initialize();
      TransactionStateCache cache = new TransactionStateCacheSupplier(hConf).get();
      LOG.info("Coprocessor is using transaction state: " + cache.getLatestState());
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.HRegionFileSystem

    Configuration hConf = conf;
    FileSystem fs = FileSystem.get(hConf);
    assertTrue(fs.mkdirs(tablePath));
    HLog hLog = HLogFactory.createHLog(fs, hlogPath, "testRegionScanner", hConf);
    HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
    HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(hConf, fs, tablePath, regionInfo);
    HRegion region = new HRegion(regionFS, hLog, hConf, htd,
                                 new MockRegionServerServices(hConf, null));
    try {
      region.initialize();
      TransactionStateCache cache = new TransactionStateCacheSupplier(hConf).get();
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.HRegionFileSystem

    Configuration hConf = conf;
    FileSystem fs = FileSystem.get(hConf);
    assertTrue(fs.mkdirs(tablePath));
    HLog hLog = HLogFactory.createHLog(fs, hlogPath, "testDeleteFiltering", hConf);
    HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf(tableName));
    HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(hConf, fs, tablePath, regionInfo);
    HRegion region = new HRegion(regionFS, hLog, hConf, htd, new MockRegionServerServices(hConf, null));
    try {
      region.initialize();
      TransactionStateCache cache = new TransactionStateCacheSupplier(hConf).get();
      LOG.info("Coprocessor is using transaction state: " + cache.getLatestState());
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.