Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FileSystem


    }
  }

  private void splitStoreFile(final StoreFile sf, final Path splitdir)
  throws IOException {
    FileSystem fs = this.parent.getFilesystem();
    byte [] family = sf.getFamily();
    String encoded = this.hri_a.getEncodedName();
    Path storedir = Store.getStoreHomedir(splitdir, encoded, family);
    StoreFile.split(fs, storedir, sf, this.splitrow, Range.bottom);
    encoded = this.hri_b.getEncodedName();
View Full Code Here


   */
  HRegion createDaughterRegion(final HRegionInfo hri,
      final RegionServerServices rsServices)
  throws IOException {
    // Package private so unit tests have access.
    FileSystem fs = this.parent.getFilesystem();
    Path regionDir = getSplitDirForDaughter(this.parent.getFilesystem(),
      this.splitdir, hri);
    HRegion r = HRegion.newHRegion(this.parent.getTableDir(),
      this.parent.getLog(), fs, this.parent.getConf(),
      hri, this.parent.getTableDesc(), rsServices);
View Full Code Here

   * of no return and so now need to abort the server to minimize damage.
   */
  public boolean rollback(final Server server, final RegionServerServices services)
  throws IOException {
    boolean result = true;
    FileSystem fs = this.parent.getFilesystem();
    ListIterator<JournalEntry> iterator =
      this.journal.listIterator(this.journal.size());
    // Iterate in reverse.
    while (iterator.hasPrevious()) {
      JournalEntry je = iterator.previous();
View Full Code Here

   * @param r
   * @throws IOException
   */
  static void cleanupAnySplitDetritus(final HRegion r) throws IOException {
    Path splitdir = getSplitDir(r);
    FileSystem fs = r.getFilesystem();
    if (!fs.exists(splitdir)) return;
    // Look at the splitdir.  It could have the encoded names of the daughter
    // regions we tried to make.  See if the daughter regions actually got made
    // out under the tabledir.  If here under splitdir still, then the split did
    // not complete.  Try and do cleanup.  This code WILL NOT catch the case
    // where we successfully created daughter a but regionserver crashed during
    // the creation of region b.  In this case, there'll be an orphan daughter
    // dir in the filesystem.  TOOD: Fix.
    FileStatus [] daughters = fs.listStatus(splitdir, new FSUtils.DirFilter(fs));
    for (int i = 0; i < daughters.length; i++) {
      cleanupDaughterRegion(fs, r.getTableDir(),
        daughters[i].getPath().getName());
    }
    cleanupSplitDir(r.getFilesystem(), splitdir);
View Full Code Here

  private static boolean resolveAndArchiveFile(Path archiveDir, File currentFile,
      String archiveStartTime) throws IOException {
    // build path as it should be in the archive
    String filename = currentFile.getName();
    Path archiveFile = new Path(archiveDir, filename);
    FileSystem fs = currentFile.getFileSystem();

    // if the file already exists in the archive, move that one to a timestamped backup. This is a
    // really, really unlikely situtation, where we get the same name for the existing file, but
    // is included just for that 1 in trillion chance.
    if (fs.exists(archiveFile)) {
      if (LOG.isDebugEnabled()) {
        LOG.debug("File:" + archiveFile + " already exists in archive, moving to "
            + "timestamped backup and overwriting current.");
      }

      // move the archive file to the stamped backup
      Path backedupArchiveFile = new Path(archiveDir, filename + SEPARATOR + archiveStartTime);
      if (!fs.rename(archiveFile, backedupArchiveFile)) {
        LOG.error("Could not rename archive file to backup: " + backedupArchiveFile
            + ", deleting existing file in favor of newer.");
        // try to delete the exisiting file, if we can't rename it
        if (!fs.delete(archiveFile, false)) {
          throw new IOException("Couldn't delete existing archive file (" + archiveFile
              + ") or rename it to the backup file (" + backedupArchiveFile
              + ") to make room for similarly named file.");
        }
      }
      LOG.debug("Backed up archive file from: " + archiveFile);
    }

    LOG.debug("No existing file in archive for:" + archiveFile +
        ", free to archive original file.");

    // at this point, we should have a free spot for the archive file
    boolean success = false;
    for (int i = 0; !success && i < DEFAULT_RETRIES_NUMBER; ++i) {
      if (i > 0) {
        // Ensure that the archive directory exists.
        // The previous "move to archive" operation has failed probably because
        // the cleaner has removed our archive directory (HBASE-7643).
        // (we're in a retry loop, so don't worry too much about the exception)
        try {
          if (!fs.exists(archiveDir)) {
            if (fs.mkdirs(archiveDir)) {
              LOG.debug("Created archive directory:" + archiveDir);
            }
          }
        } catch (IOException e) {
          LOG.warn("Failed to create the archive directory: " + archiveDir, e);
View Full Code Here

        if (overlapsToSideline > maxOverlapsToSideline) {
          overlapsToSideline = maxOverlapsToSideline;
        }
        List<HbckInfo> regionsToSideline =
          RegionSplitCalculator.findBigRanges(bigOverlap, overlapsToSideline);
        FileSystem fs = FileSystem.get(conf);
        for (HbckInfo regionToSideline: regionsToSideline) {
          try {
            LOG.info("Closing region: " + regionToSideline);
            closeRegion(regionToSideline);
          } catch (IOException ioe) {
View Full Code Here

   @Deprecated
  public HTableDescriptor getTableDesc() {
    Configuration c = HBaseConfiguration.create();
    c.set("fs.defaultFS", c.get(HConstants.HBASE_DIR));
    c.set("fs.default.name", c.get(HConstants.HBASE_DIR));
    FileSystem fs;
    try {
      fs = FileSystem.get(c);
    } catch (IOException e) {
      throw new RuntimeException(e);
    }
View Full Code Here

   * @deprecated Do not use; expensive call
   */
  @Deprecated
  public void setTableDesc(HTableDescriptor newDesc) {
    Configuration c = HBaseConfiguration.create();
    FileSystem fs;
    try {
      fs = FileSystem.get(c);
    } catch (IOException e) {
      throw new RuntimeException(e);
    }
View Full Code Here

   * and the number of the files to copy.
   */
  private static Path[] createInputFiles(final Configuration conf,
      final List<Pair<Path, Long>> snapshotFiles, int mappers)
      throws IOException, InterruptedException {
    FileSystem fs = FileSystem.get(conf);
    Path inputFolderPath = getInputFolderPath(fs, conf);
    LOG.debug("Input folder location: " + inputFolderPath);

    List<List<Path>> splits = getBalancedSplits(snapshotFiles, mappers);
    Path[] inputFiles = new Path[splits.size()];
View Full Code Here

      printUsageAndExit();
    }

    Configuration conf = getConf();
    Path inputRoot = FSUtils.getRootDir(conf);
    FileSystem inputFs = FileSystem.get(conf);
    FileSystem outputFs = FileSystem.get(outputRoot.toUri(), conf);

    Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, inputRoot);
    Path snapshotTmpDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshotName, outputRoot);
    Path outputSnapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, outputRoot);

    // Check if the snapshot already exists
    if (outputFs.exists(outputSnapshotDir)) {
      System.err.println("The snapshot '" + snapshotName +
        "' already exists in the destination: " + outputSnapshotDir);
      return 1;
    }

    // Check if the snapshot already in-progress
    if (outputFs.exists(snapshotTmpDir)) {
      System.err.println("A snapshot with the same name '" + snapshotName + "' is in-progress");
      return 1;
    }

    // Step 0 - Extract snapshot files to copy
    final List<Pair<Path, Long>> files = getSnapshotFiles(inputFs, snapshotDir);

    // Step 1 - Copy fs1:/.snapshot/<snapshot> to  fs2:/.snapshot/.tmp/<snapshot>
    // The snapshot references must be copied before the hfiles otherwise the cleaner
    // will remove them because they are unreferenced.
    try {
      FileUtil.copy(inputFs, snapshotDir, outputFs, snapshotTmpDir, false, false, conf);
    } catch (IOException e) {
      System.err.println("Failed to copy the snapshot directory: from=" + snapshotDir +
        " to=" + snapshotTmpDir);
      e.printStackTrace(System.err);
      return 1;
    }

    // Step 2 - Start MR Job to copy files
    // The snapshot references must be copied before the files otherwise the files gets removed
    // by the HFileArchiver, since they have no references.
    try {
      if (!runCopyJob(inputRoot, outputRoot, files, verifyChecksum,
          filesUser, filesGroup, filesMode, mappers)) {
        throw new ExportSnapshotException("Snapshot export failed!");
      }

      // Step 3 - Rename fs2:/.snapshot/.tmp/<snapshot> fs2:/.snapshot/<snapshot>
      if (!outputFs.rename(snapshotTmpDir, outputSnapshotDir)) {
        System.err.println("Snapshot export failed!");
        System.err.println("Unable to rename snapshot directory from=" +
                           snapshotTmpDir + " to=" + outputSnapshotDir);
        return 1;
      }

      return 0;
    } catch (Exception e) {
      System.err.println("Snapshot export failed!");
      e.printStackTrace(System.err);
      outputFs.delete(outputSnapshotDir, true);
      return 1;
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.FileSystem

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.