Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.Path


                           hbi.getHdfsRegionDir());
            }

            he.hdfsRegionDir = regionDir.getPath();
            he.hdfsRegionDirModTime = regionDir.getModificationTime();
            Path regioninfoFile = new Path(he.hdfsRegionDir, HRegion.REGIONINFO_FILE);
            he.hdfsRegioninfoFilePresent = fs.exists(regioninfoFile);
            // we add to orphan list when we attempt to read .regioninfo

            // Set a flag if this region contains only edits
            // This is special case if a region is left after split
            he.hdfsOnlyEdits = true;
            FileStatus[] subDirs = fs.listStatus(regionDir.getPath());
            Path ePath = HLog.getRegionDirRecoveredEditsDir(regionDir.getPath());
            for (FileStatus subDir : subDirs) {
              String sdName = subDir.getPath().getName();
              if (!sdName.startsWith(".") && !sdName.equals(ePath.getName())) {
                he.hdfsOnlyEdits = false;
                break;
              }
            }
            hbi.hdfsEntry = he;
View Full Code Here


         this, conf, getMasterFileSystem().getFileSystem(),
         getMasterFileSystem().getOldLogDir());
         Threads.setDaemonThreadRunning(logCleaner.getThread(), n + ".oldLogCleaner");

   //start the hfile archive cleaner thread
    Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
    this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem()
        .getFileSystem(), archiveDir);
    Threads.setDaemonThreadRunning(hfileCleaner.getThread(), n + ".archivedHFileCleaner");

   // Put up info server.
View Full Code Here

   * @param info HRegionInfo for region to be deleted
   * @throws IOException
   */
  public static void archiveRegion(Configuration conf, FileSystem fs, HRegionInfo info)
      throws IOException {
    Path rootDir = FSUtils.getRootDir(conf);
    archiveRegion(fs, rootDir, HTableDescriptor.getTableDir(rootDir, info.getTableName()),
      HRegion.getRegionDir(rootDir, info));
  }
View Full Code Here

      return false;
    }

    // make sure the regiondir lives under the tabledir
    Preconditions.checkArgument(regionDir.toString().startsWith(tableDir.toString()));
    Path regionArchiveDir = HFileArchiveUtil.getRegionArchiveDir(rootdir, tableDir, regionDir);

    LOG.debug("Have an archive directory, preparing to move files");
    FileStatusConverter getAsFile = new FileStatusConverter(fs);
    // otherwise, we attempt to archive the store files
View Full Code Here

   * @param family the family hosting the store files
   * @throws IOException if the files could not be correctly disposed.
   */
  public static void archiveFamily(FileSystem fs, Configuration conf,
      HRegionInfo parent, Path tableDir, byte[] family) throws IOException {
    Path familyDir = new Path(tableDir, new Path(parent.getEncodedName(), Bytes.toString(family)));
    FileStatus[] storeFiles = FSUtils.listStatus(fs, familyDir, null);
    if (storeFiles == null) {
      LOG.debug("No store files to dispose for region=" + parent.getRegionNameAsString() +
          ", family=" + Bytes.toString(family));
      return;
    }

    FileStatusConverter getAsFile = new FileStatusConverter(fs);
    Collection<File> toArchive = Lists.transform(Arrays.asList(storeFiles), getAsFile);
    Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, parent, tableDir, family);

    // do the actual archive
    if (!resolveAndArchive(fs, storeArchiveDir, toArchive)) {
      throw new IOException("Failed to archive/delete all the files for region:"
          + Bytes.toString(parent.getRegionName()) + ", family:" + Bytes.toString(family)
View Full Code Here

    // build the archive path
    if (parent == null || family == null) throw new IOException(
        "Need to have a parent region and a family to archive from.");

    Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, parent, family);

    // make sure we don't archive if we can't and that the archive dir exists
    if (!fs.mkdirs(storeArchiveDir)) {
      throw new IOException("Could not make archive directory (" + storeArchiveDir + ") for store:"
          + Bytes.toString(family) + ", deleting compacted files instead.");
View Full Code Here

   * @param storeFile file to be archived
   * @throws IOException if the files could not be correctly disposed.
   */
  public static void archiveStoreFile(FileSystem fs, HRegionInfo regionInfo,
      Configuration conf, Path tableDir, byte[] family, Path storeFile) throws IOException {
    Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, tableDir, family);
    // make sure we don't archive if we can't and that the archive dir exists
    if (!fs.mkdirs(storeArchiveDir)) {
      throw new IOException("Could not make archive directory (" + storeArchiveDir + ") for store:"
          + Bytes.toString(family) + ", deleting compacted files instead.");
    }
View Full Code Here

          }
        } else {
          // otherwise its a directory and we need to archive all files
          LOG.debug(file + " is a directory, archiving children files");
          // so we add the directory name to the one base archive
          Path parentArchiveDir = new Path(baseArchiveDir, file.getName());
          // and then get all the files from that directory and attempt to
          // archive those too
          Collection<File> children = file.getChildren();
          failures.addAll(resolveAndArchive(fs, parentArchiveDir, children, start));
        }
View Full Code Here

   */
  private static boolean resolveAndArchiveFile(Path archiveDir, File currentFile,
      String archiveStartTime) throws IOException {
    // build path as it should be in the archive
    String filename = currentFile.getName();
    Path archiveFile = new Path(archiveDir, filename);
    FileSystem fs = currentFile.getFileSystem();

    // if the file already exists in the archive, move that one to a timestamped backup. This is a
    // really, really unlikely situtation, where we get the same name for the existing file, but
    // is included just for that 1 in trillion chance.
    if (fs.exists(archiveFile)) {
      if (LOG.isDebugEnabled()) {
        LOG.debug("File:" + archiveFile + " already exists in archive, moving to "
            + "timestamped backup and overwriting current.");
      }

      // move the archive file to the stamped backup
      Path backedupArchiveFile = new Path(archiveDir, filename + SEPARATOR + archiveStartTime);
      if (!fs.rename(archiveFile, backedupArchiveFile)) {
        LOG.error("Could not rename archive file to backup: " + backedupArchiveFile
            + ", deleting existing file in favor of newer.");
        // try to delete the exisiting file, if we can't rename it
        if (!fs.delete(archiveFile, false)) {
View Full Code Here

     * @return <tt>true</tt> on success
     * @throws IOException
     */
    public boolean moveAndClose(Path dest) throws IOException {
      this.close();
      Path p = this.getPath();
      return fs.rename(p, dest);
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.Path

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.