Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FileSystem.listStatus()


    // out under the tabledir.  If here under splitdir still, then the split did
    // not complete.  Try and do cleanup.  This code WILL NOT catch the case
    // where we successfully created daughter a but regionserver crashed during
    // the creation of region b.  In this case, there'll be an orphan daughter
    // dir in the filesystem.  TOOD: Fix.
    FileStatus [] daughters = fs.listStatus(splitdir, new FSUtils.DirFilter(fs));
    for (int i = 0; i < daughters.length; i++) {
      cleanupDaughterRegion(fs, r.getTableDir(),
        daughters[i].getPath().getName());
    }
    cleanupSplitDir(r.getFilesystem(), splitdir);
View Full Code Here


      LOG.info("Moving files from " + src + " into containing region " + dst);
      // FileSystem.rename is inconsistent with directories -- if the
      // dst (foo/a) exists and is a dir, and the src (foo/b) is a dir,
      // it moves the src into the dst dir resulting in (foo/a/b).  If
      // the dst does not exist, and the src a dir, src becomes dst. (foo/b)
      for (FileStatus hfile : fs.listStatus(src)) {
        boolean success = fs.rename(hfile.getPath(), dst);
        if (success) {
          fileMoves++;
        }
      }
View Full Code Here

    if (fs.isFile(p)) {
      return;
    }

    if (fs.getFileStatus(p).isDir()) {
      FileStatus[] fss= fs.listStatus(p);
      for (FileStatus status : fss) {
        debugLsr(conf, status.getPath(), errors);
      }
    }
  }
View Full Code Here

   * overlapping regions.
   */
  private void adoptHdfsOrphan(HbckInfo hi) throws IOException {
    Path p = hi.getHdfsRegionDir();
    FileSystem fs = p.getFileSystem(getConf());
    FileStatus[] dirs = fs.listStatus(p);
    if (dirs == null) {
      LOG.warn("Attempt to adopt ophan hdfs region skipped becuase no files present in " +
          p + ". This dir could probably be deleted.");
      return ;
    }
View Full Code Here

    for (FileStatus cf : dirs) {
      String cfName= cf.getPath().getName();
      // TODO Figure out what the special dirs are
      if (cfName.startsWith(".") || cfName.equals("splitlog")) continue;

      FileStatus[] hfiles = fs.listStatus(cf.getPath());
      for (FileStatus hfile : hfiles) {
        byte[] start, end;
        HFile.Reader hf = null;
        try {
          CacheConfig cacheConf = new CacheConfig(getConf());
View Full Code Here

   * @throws IOException
   */
  private Set<String> getColumnFamilyList(Set<String> columns, HbckInfo hbi) throws IOException {
    Path regionDir = hbi.getHdfsRegionDir();
    FileSystem fs = regionDir.getFileSystem(getConf());
    FileStatus[] subDirs = fs.listStatus(regionDir, new FSUtils.FamilyDirFilter(fs));
    for (FileStatus subdir : subDirs) {
      String columnfamily = subdir.getPath().getName();
      columns.add(columnfamily);
    }
    return columns;
View Full Code Here

    // list all tables from HDFS
    List<FileStatus> tableDirs = Lists.newArrayList();

    boolean foundVersionFile = false;
    FileStatus[] files = fs.listStatus(rootDir);
    for (FileStatus file : files) {
      String dirName = file.getPath().getName();
      if (dirName.equals(HConstants.VERSION_FILE_NAME)) {
        foundVersionFile = true;
      } else {
View Full Code Here

    }

    Path hbaseDir = new Path(getConf().get(HConstants.HBASE_DIR));
    FileSystem fs = hbaseDir.getFileSystem(getConf());
    User user = User.getCurrent();
    FileStatus[] files = fs.listStatus(hbaseDir);
    for (FileStatus file : files) {
      try {
        FSUtils.checkAccess(user, file, FsAction.WRITE);
      } catch (AccessControlException ace) {
        LOG.warn("Got AccessControlException when preCheckPermission ", ace);
View Full Code Here

    LOG.debug("Contained region dir after close and pause");
    debugLsr(contained.getHdfsRegionDir());

    // rename the contained into the container.
    FileSystem fs = targetRegionDir.getFileSystem(getConf());
    FileStatus[] dirs = fs.listStatus(contained.getHdfsRegionDir());

    if (dirs == null) {
      if (!fs.exists(contained.getHdfsRegionDir())) {
        LOG.warn("HDFS region dir " + contained.getHdfsRegionDir() + " already sidelined.");
      } else {
View Full Code Here

    if (!fs.exists(snapshotDir)) {
      return snapshotDescs;
    }

    // ignore all the snapshots in progress
    FileStatus[] snapshots = fs.listStatus(snapshotDir,
      new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs));
    // loop through all the completed snapshots
    for (FileStatus snapshot : snapshots) {
      Path info = new Path(snapshot.getPath(), SnapshotDescriptionUtils.SNAPSHOTINFO_FILE);
      // if the snapshot is bad
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.