Package org.apache.hadoop.hbase.master

Examples of org.apache.hadoop.hbase.master.MasterFileSystem


   * Removes from hdfs the families that are not longer present in the new table descriptor.
   */
  private void deleteFamilyFromFS(final List<HRegionInfo> hris, final Set<byte[]> oldFamilies) {
    try {
      Set<byte[]> newFamilies = this.htd.getFamiliesKeys();
      MasterFileSystem mfs = this.masterServices.getMasterFileSystem();
      for (byte[] familyName: oldFamilies) {
        if (!newFamilies.contains(familyName)) {
          LOG.debug("Removing family=" + Bytes.toString(familyName) +
                    " from table=" + this.tableName);
          for (HRegionInfo hri: hris) {
            // Delete the family directory in FS for all the regions one by one
            mfs.deleteFamilyFromFS(hri, familyName);
          }
        }
      }
    } catch (IOException e) {
      LOG.warn("Unable to remove on-disk directories for the removed families", e);
View Full Code Here


      cpHost.postTruncateTableHandler(this.tableName);
    }
  }

  private void recreateTable(final List<HRegionInfo> regions) throws IOException {
    MasterFileSystem mfs = this.masterServices.getMasterFileSystem();
    Path tempdir = mfs.getTempDir();
    FileSystem fs = mfs.getFileSystem();

    AssignmentManager assignmentManager = this.masterServices.getAssignmentManager();

    // 1. Set table znode
    CreateTableHandler.checkAndSetEnablingTable(assignmentManager, tableName);
    try {
      // 1. Create Table Descriptor
      new FSTableDescriptors(server.getConfiguration())
        .createTableDescriptorForTableDirectory(tempdir, this.hTableDescriptor, false);
      Path tempTableDir = FSUtils.getTableDir(tempdir, this.tableName);
      Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), this.tableName);

      HRegionInfo[] newRegions;
      if (this.preserveSplits) {
        newRegions = regions.toArray(new HRegionInfo[regions.size()]);
        LOG.info("Truncate will preserve " + newRegions.length + " regions");
View Full Code Here

      try {
        if (this.shouldSplitHlog) {
          LOG.info("Splitting logs for " + serverName + " before assignment.");
          if (distributedLogReplay) {
            LOG.info("Mark regions in recovery before assignment.");
            MasterFileSystem mfs = this.services.getMasterFileSystem();
            mfs.prepareLogReplay(serverName, hris);
          } else {
            this.services.getMasterFileSystem().splitLog(serverName);
          }
          am.getRegionStates().logSplit(serverName);
        } else {
View Full Code Here

    // NOTE: At this point we still have data on disk, but nothing in hbase:meta
    //       if the rename below fails, hbck will report an inconsistency.
    // -----------------------------------------------------------------------

    // 2. Move the table in /hbase/.tmp
    MasterFileSystem mfs = this.masterServices.getMasterFileSystem();
    Path tempTableDir = mfs.moveTableToTemp(tableName);

    // 3. Archive regions from FS (temp directory)
    FileSystem fs = mfs.getFileSystem();
    for (HRegionInfo hri: regions) {
      LOG.debug("Archiving region " + hri.getRegionNameAsString() + " from FS");
      HFileArchiver.archiveRegion(fs, mfs.getRootDir(),
          tempTableDir, HRegion.getRegionDir(tempTableDir, hri.getEncodedName()));
    }

    // 4. Delete table directory from FS (temp directory)
    if (!fs.delete(tempTableDir, true)) {
View Full Code Here

    }

    String snapshotName = snapshot.getName();
    LOG.debug("Deleting snapshot: " + snapshotName);
    // first create the snapshot description and check to see if it exists
    MasterFileSystem fs = master.getMasterFileSystem();
    Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);

    // delete the existing snapshot
    if (!fs.getFileSystem().delete(snapshotDir, true)) {
      throw new HBaseSnapshotException("Failed to delete snapshot directory: " + snapshotDir);
    }

    // call coproc post hook
    if (cpHost != null) {
View Full Code Here

   *  -  [if something fail here: you need to run hbck to fix hbase:meta entries]
   * The passed in list gets changed in this method
   */
  @Override
  protected void handleTableOperation(List<HRegionInfo> hris) throws IOException {
    MasterFileSystem fileSystemManager = masterServices.getMasterFileSystem();
    HConnection conn = masterServices.getShortCircuitConnection();
    FileSystem fs = fileSystemManager.getFileSystem();
    Path rootDir = fileSystemManager.getRootDir();
    TableName tableName = hTableDescriptor.getTableName();

    try {
      // 1. Update descriptor
      this.masterServices.getTableDescriptors().add(hTableDescriptor);
View Full Code Here

  // ==========================================================================
  //  Helpers
  // ==========================================================================
  private void logFSTree() throws IOException {
    MasterFileSystem mfs = UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
    FSUtils.logFileSystemState(mfs.getFileSystem(), mfs.getRootDir(), LOG);
  }
View Full Code Here

   * @return array of the corrupted HFiles
   * @throws IOException on unexecpted error reading the FS
   */
  public static ArrayList corruptSnapshot(final HBaseTestingUtility util, final String snapshotName)
      throws IOException {
    final MasterFileSystem mfs = util.getHBaseCluster().getMaster().getMasterFileSystem();
    final FileSystem fs = mfs.getFileSystem();

    Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName,
                                                                        mfs.getRootDir());
    SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
    final TableName table = TableName.valueOf(snapshotDesc.getTable());

    final ArrayList corruptedFiles = new ArrayList();
    final Configuration conf = util.getConfiguration();
View Full Code Here

  }

  public static void deleteArchiveDirectory(final HBaseTestingUtility util)
      throws IOException {
    // Ensure the archiver to be empty
    MasterFileSystem mfs = util.getMiniHBaseCluster().getMaster().getMasterFileSystem();
    Path archiveDir = new Path(mfs.getRootDir(), HConstants.HFILE_ARCHIVE_DIRECTORY);
    mfs.getFileSystem().delete(archiveDir, true);
  }
View Full Code Here

    // Verify descriptor from master
    HTableDescriptor htd = admin.getTableDescriptor(tableName);
    verifyTableDescriptor(htd, tableName, families);

    // Verify descriptor from HDFS
    MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
    Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
    htd = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
    verifyTableDescriptor(htd, tableName, families);
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.master.MasterFileSystem

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.