Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.DirectoryListing


      // insert one put to ensure a minimal size
      Put p = new Put(sb);
      p.add(sb, sb, sb);
      h.put(p);

      DirectoryListing dl = dfs.getClient().listPaths(rootDir, HdfsFileStatus.EMPTY_NAME);
      HdfsFileStatus[] hfs = dl.getPartialListing();

      // As we wrote a put, we should have at least one log file.
      Assert.assertTrue(hfs.length >= 1);
      for (HdfsFileStatus hf : hfs) {
        LOG.info("Log file found: " + hf.getLocalName() + " in " + rootDir);
View Full Code Here


   * @throws IOException if other I/O error occurred
   */
  DirectoryListing getListing(String src, byte[] startAfter,
      boolean needLocation)
    throws AccessControlException, UnresolvedLinkException, IOException {
    DirectoryListing dl;
    FSPermissionChecker pc = getPermissionChecker();
    readLock();
    try {
      if (isPermissionEnabled) {
        if (dir.isDir(src)) {
View Full Code Here

  @Override // ClientProtocol
  public DirectoryListing getListing(String src, byte[] startAfter,
      boolean needLocation)
  throws IOException {
    DirectoryListing files = namesystem.getListing(
        src, startAfter, needLocation);
    if (files != null) {
      metrics.incrGetListingOps();
      metrics.incrFilesInGetListingOps(files.getPartialListing().length);
    }
    return files;
  }
View Full Code Here

    String path = file.getFullName(parent);
    boolean isOpen = false;

    if (file.isDir()) {
      byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME;
      DirectoryListing thisListing;
      if (showFiles) {
        out.println(path + " <dir>");
      }
      res.totalDirs++;
      do {
        assert lastReturnedName != null;
        thisListing = namenode.getListing(path, lastReturnedName);
        if (thisListing == null) {
          return;
        }
        HdfsFileStatus[] files = thisListing.getPartialListing();
        for (int i = 0; i < files.length; i++) {
          check(path, files[i], res);
        }
        lastReturnedName = thisListing.getLastName();
      } while (thisListing.hasMore());
      return;
    }
    long fileLen = file.getLen();
    // Get block locations without updating the file access time
    // and without block access tokens
View Full Code Here

  }

  @Override
  public DirectoryListing getListing(String src, byte[] startAfter)
  throws IOException {
    DirectoryListing files = namesystem.getListing(src, startAfter);
    myMetrics.incrNumGetListingOps();
    if (files != null) {
      myMetrics.incrNumFilesInGetListingOps(files.getPartialListing().length);
    }
    return files;
  }
View Full Code Here

  @Override
  public FileStatus[] listStatus(Path p) throws IOException {
    String src = getPathName(p);
   
    // fetch the first batch of entries in the directory
    DirectoryListing thisListing = dfs.listPaths(
        src, HdfsFileStatus.EMPTY_NAME);
   
    if (thisListing == null) { // the directory does not exist
      return null;
    }
   
    HdfsFileStatus[] partialListing = thisListing.getPartialListing();
    if (!thisListing.hasMore()) { // got all entries of the directory
      FileStatus[] stats = new FileStatus[partialListing.length];
      for (int i = 0; i < partialListing.length; i++) {
        stats[i] = makeQualified(partialListing[i], p);
      }
      statistics.incrementReadOps(1);
      return stats;
    }
   
    // The directory size is too big that it needs to fetch more
    // estimate the total number of entries in the directory
    int totalNumEntries =
      partialListing.length + thisListing.getRemainingEntries();
    ArrayList<FileStatus> listing =
      new ArrayList<FileStatus>(totalNumEntries);
    // add the first batch of entries to the array list
    for (HdfsFileStatus fileStatus : partialListing) {
      listing.add(makeQualified(fileStatus, p));
    }
    statistics.incrementLargeReadOps(1);

    // now fetch more entries
    do {
      thisListing = dfs.listPaths(src, thisListing.getLastName());
     
      if (thisListing == null) {
        return null; // the directory is deleted
      }
     
      partialListing = thisListing.getPartialListing();
      for (HdfsFileStatus fileStatus : partialListing) {
        listing.add(makeQualified(fileStatus, p));
      }
      statistics.incrementLargeReadOps(1);
    } while (thisListing.hasMore());

    return listing.toArray(new FileStatus[listing.size()]);
  }
View Full Code Here

  @Override
  public FileStatus[] listStatus(Path p) throws IOException {
    String src = getPathName(p);

    // fetch the first batch of entries in the directory
    DirectoryListing thisListing = dfs.listPaths(
        src, HdfsFileStatus.EMPTY_NAME);

    if (thisListing == null) { // the directory does not exist
      throw new FileNotFoundException("File " + p + " does not exist.");
    }
   
    HdfsFileStatus[] partialListing = thisListing.getPartialListing();
    if (!thisListing.hasMore()) { // got all entries of the directory
      FileStatus[] stats = new FileStatus[partialListing.length];
      for (int i = 0; i < partialListing.length; i++) {
        stats[i] = makeQualified(partialListing[i], p);
      }
      return stats;
    }

    // The directory size is too big that it needs to fetch more
    // estimate the total number of entries in the directory
    int totalNumEntries =
      partialListing.length + thisListing.getRemainingEntries();
    ArrayList<FileStatus> listing =
      new ArrayList<FileStatus>(totalNumEntries);
    // add the first batch of entries to the array list
    for (HdfsFileStatus fileStatus : partialListing) {
      listing.add(makeQualified(fileStatus, p));
    }
    // now fetch more entries
    do {
      thisListing = dfs.listPaths(src, thisListing.getLastName());
      if (thisListing == null) { // the directory is deleted
        throw new FileNotFoundException("File " + p + " does not exist.");
      }
      partialListing = thisListing.getPartialListing();
      for (HdfsFileStatus fileStatus : partialListing) {
        listing.add(makeQualified(fileStatus, p));
      }
    } while (thisListing.hasMore());
    return listing.toArray(new FileStatus[listing.size()]);
  }
View Full Code Here

    String path = file.getFullName(parent);
    boolean isOpen = false;

    if (file.isDir()) {
      byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME;
      DirectoryListing thisListing;
      if (showFiles) {
        out.println(path + " <dir>");
      }
      res.totalDirs++;
      do {
        assert lastReturnedName != null;
        thisListing = namenode.getListing(path, lastReturnedName);
        if (thisListing == null) {
          return;
        }
        HdfsFileStatus[] files = thisListing.getPartialListing();
        for (int i = 0; i < files.length; i++) {
          check(path, files[i], res);
        }
        lastReturnedName = thisListing.getLastName();
      } while (thisListing.hasMore());
      return;
    }
    long fileLen = file.getLen();
    LocatedBlocks blocks = namenode.getBlockLocationsNoATime(path, 0, fileLen);
    if (blocks == null) { // the file is deleted
View Full Code Here

        out.print("<a href=\"" + req.getRequestURL() + "?dir=" + parent
            + "&namenodeInfoPort=" + namenodeInfoPort
            + JspHelper.SET_DELEGATION + tokenString
            + "\">Go to parent directory</a><br>");

      DirectoryListing thisListing =
        dfs.listPaths(target, HdfsFileStatus.EMPTY_NAME);
      if (thisListing == null || thisListing.getPartialListing().length == 0) {
        out.print("Empty directory");
      } else {
        JspHelper.addTableHeader(out);
        int row = 0;
        JspHelper.addTableRow(out, headings, row++);
        String cols[] = new String[headings.length];
        do {
          HdfsFileStatus[] files = thisListing.getPartialListing();
          for (int i = 0; i < files.length; i++) {
            String localFileName = files[i].getLocalName();
            // Get the location of the first block of the file
            if (!files[i].isDir()) {
              cols[1] = "file";
              cols[2] = StringUtils.byteDesc(files[i].getLen());
              cols[3] = Short.toString(files[i].getReplication());
              cols[4] = StringUtils.byteDesc(files[i].getBlockSize());
            } else {
              cols[1] = "dir";
              cols[2] = "";
              cols[3] = "";
              cols[4] = "";
            }
            String datanodeUrl = req.getRequestURL() + "?dir="
              + URLEncoder.encode(files[i].getFullName(target), "UTF-8")
              + "&namenodeInfoPort=" + namenodeInfoPort
              + JspHelper.SET_DELEGATION + tokenString;
            cols[0] = "<a href=\"" + datanodeUrl + "\">"
              + localFileName + "</a>";
            cols[5] = FsShell.dateForm.format(new Date((files[i]
              .getModificationTime())));
            cols[6] = files[i].getPermission().toString();
            cols[7] = files[i].getOwner();
            cols[8] = files[i].getGroup();
            JspHelper.addTableRow(out, cols, row++);
          }
          if (!thisListing.hasMore()) {
            break;
          }
          thisListing = dfs.listPaths(target, thisListing.getLastName());
        } while (thisListing != null);
        JspHelper.addTableFooter(out);
      }
    }
    String namenodeHost = datanode.getNameNodeAddr().getHostName();
View Full Code Here

  /**
   */
  @Override
  public DirectoryListing getListing(String src, byte[] startAfter)
  throws IOException {
    DirectoryListing files = namesystem.getListing(src, startAfter);
    if (files != null) {
      myMetrics.numGetListingOps.inc();
      myMetrics.numFilesInGetListingOps.inc(files.getPartialListing().length);
    }
    return files;
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.DirectoryListing

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.