Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.DirectoryListing


      LOG.debug("NFS READDIR fileId: " + handle.getFileId() + " cookie: "
          + cookie + " count: " + count);
    }

    HdfsFileStatus dirStatus = null;
    DirectoryListing dlisting = null;
    Nfs3FileAttributes postOpAttr = null;
    long dotdotFileId = 0;
    try {
      String dirFileIdPath = Nfs3Utils.getFileIdPath(handle);
      dirStatus = dfsClient.getFileInfo(dirFileIdPath);
      if (dirStatus == null) {
        LOG.info("Can't get path for fileId:" + handle.getFileId());
        return new READDIR3Response(Nfs3Status.NFS3ERR_STALE);
      }
      if (!dirStatus.isDir()) {
        LOG.error("Can't readdir for regular file, fileId:"
            + handle.getFileId());
        return new READDIR3Response(Nfs3Status.NFS3ERR_NOTDIR);
      }
      long cookieVerf = request.getCookieVerf();
      if ((cookieVerf != 0) && (cookieVerf != dirStatus.getModificationTime())) {
        LOG.error("CookierVerf mismatch. request cookierVerf:" + cookieVerf
            + " dir cookieVerf:" + dirStatus.getModificationTime());
        return new READDIR3Response(Nfs3Status.NFS3ERR_BAD_COOKIE);
      }

      if (cookie == 0) {
        // Get dotdot fileId
        String dotdotFileIdPath = dirFileIdPath + "/..";
        HdfsFileStatus dotdotStatus = dfsClient.getFileInfo(dotdotFileIdPath);

        if (dotdotStatus == null) {
          // This should not happen
          throw new IOException("Can't get path for handle path:"
              + dotdotFileIdPath);
        }
        dotdotFileId = dotdotStatus.getFileId();
      }

      // Get the list from the resume point
      byte[] startAfter;
      if(cookie == 0 ) {
        startAfter = HdfsFileStatus.EMPTY_NAME;
      } else {
        String inodeIdPath = Nfs3Utils.getFileIdPath(cookie);
        startAfter = inodeIdPath.getBytes();
      }
     
      dlisting = listPaths(dfsClient, dirFileIdPath, startAfter);
      postOpAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
      if (postOpAttr == null) {
        LOG.error("Can't get path for fileId:" + handle.getFileId());
        return new READDIR3Response(Nfs3Status.NFS3ERR_STALE);
      }
    } catch (IOException e) {
      LOG.warn("Exception ", e);
      return new READDIR3Response(Nfs3Status.NFS3ERR_IO);
    }

    /**
     * Set up the dirents in the response. fileId is used as the cookie with one
     * exception. Linux client can either be stuck with "ls" command (on REHL)
     * or report "Too many levels of symbolic links" (Ubuntu).
     *
     * The problem is that, only two items returned, "." and ".." when the
     * namespace is empty. Both of them are "/" with the same cookie(root
     * fileId). Linux client doesn't think such a directory is a real directory.
     * Even though NFS protocol specifies cookie is an opaque data, Linux client
     * somehow doesn't like an empty dir returns same cookie for both "." and
     * "..".
     *
     * The workaround is to use 0 as the cookie for "." and always return "." as
     * the first entry in readdir/readdirplus response.
     */
    HdfsFileStatus[] fstatus = dlisting.getPartialListing();   
    int n = (int) Math.min(fstatus.length, count-2);
    boolean eof = (n < fstatus.length) ? false : (dlisting
        .getRemainingEntries() == 0);
   
    Entry3[] entries;
    if (cookie == 0) {
      entries = new Entry3[n + 2];
View Full Code Here


  @Override
  public FileStatus[] listStatus(Path p) throws IOException {
    String src = getPathName(p);

    // fetch the first batch of entries in the directory
    DirectoryListing thisListing = dfs.listPaths(
        src, HdfsFileStatus.EMPTY_NAME);

    if (thisListing == null) { // the directory does not exist
      throw new FileNotFoundException("File " + p + " does not exist.");
    }
   
    HdfsFileStatus[] partialListing = thisListing.getPartialListing();
    if (!thisListing.hasMore()) { // got all entries of the directory
      FileStatus[] stats = new FileStatus[partialListing.length];
      for (int i = 0; i < partialListing.length; i++) {
        stats[i] = makeQualified(partialListing[i], p);
      }
      statistics.incrementReadOps(1);
      return stats;
    }

    // The directory size is too big that it needs to fetch more
    // estimate the total number of entries in the directory
    int totalNumEntries =
      partialListing.length + thisListing.getRemainingEntries();
    ArrayList<FileStatus> listing =
      new ArrayList<FileStatus>(totalNumEntries);
    // add the first batch of entries to the array list
    for (HdfsFileStatus fileStatus : partialListing) {
      listing.add(makeQualified(fileStatus, p));
    }
    statistics.incrementLargeReadOps(1);
    // now fetch more entries
    do {
      thisListing = dfs.listPaths(src, thisListing.getLastName());
      if (thisListing == null) { // the directory is deleted
        throw new FileNotFoundException("File " + p + " does not exist.");
      }
      partialListing = thisListing.getPartialListing();
      for (HdfsFileStatus fileStatus : partialListing) {
        listing.add(makeQualified(fileStatus, p));
      }
      statistics.incrementLargeReadOps(1);
    } while (thisListing.hasMore());
    return listing.toArray(new FileStatus[listing.size()]);
  }
View Full Code Here

 
  public static DirectoryListing convert(DirectoryListingProto dl) {
    if (dl == null)
      return null;
    List<HdfsFileStatusProto> partList =  dl.getPartialListingList();
    return new DirectoryListing(
        partList.isEmpty() ? new HdfsLocatedFileStatus[0]
          : PBHelper.convert(
              partList.toArray(new HdfsFileStatusProto[partList.size()])),
        dl.getRemainingEntries());
  }
View Full Code Here

      GetListingResponseProto.newBuilder().build();
  @Override
  public GetListingResponseProto getListing(RpcController controller,
      GetListingRequestProto req) throws ServiceException {
    try {
      DirectoryListing result = server.getListing(
          req.getSrc(), req.getStartAfter().toByteArray(),
          req.getNeedLocation());
      if (result !=null) {
        return GetListingResponseProto.newBuilder().setDirList(
          PBHelper.convert(result)).build();
View Full Code Here

  }

  @Override // ClientProtocol
  public DirectoryListing getListing(String src, byte[] startAfter,
      boolean needLocation) throws IOException {
    DirectoryListing files = namesystem.getListing(
        src, startAfter, needLocation);
    if (files != null) {
      metrics.incrGetListingOps();
      metrics.incrFilesInGetListingOps(files.getPartialListing().length);
    }
    return files;
  }
View Full Code Here

                HdfsFileStatus.EMPTY_NAME, targetNode)};
        if (needLocation) {
          return new LocatedDirectoryListing(partialListing,
              new LocatedBlocks[] {createLocatedBlocks(targetNode)}, 0);
        } else {
          return new DirectoryListing(partialListing, 0);
        }
      }
      INodeDirectory dirInode = (INodeDirectory)targetNode;
      List<INode> contents = dirInode.getChildren();
      // find the first child whose name is greater than startAfter
      int startChild = dirInode.nextChild(startAfter);
      int totalNumChildren = contents.size();
      int numOfListing = Math.min(totalNumChildren-startChild, this.lsLimit);
      HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
      LocatedBlocks [] blockLocations = new LocatedBlocks[numOfListing];
      for (int i=0; i<numOfListing; i++) {
        INode cur = contents.get(startChild+i);
        listing[i] = createHdfsFileStatus(cur.name, cur);
        if (needLocation) {
          blockLocations[i] = createLocatedBlocks(cur);
        }
      }
      if (needLocation) {
        return new LocatedDirectoryListing(
            listing, blockLocations, totalNumChildren-startChild-numOfListing);
      } else {
        return new DirectoryListing(
            listing, totalNumChildren-startChild-numOfListing);
      }
  } finally {
    readUnlock();
  }
View Full Code Here

      pathstack.push(path);
      while (!pathstack.empty()) {
        String p = pathstack.pop();
        try {
          byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME;
          DirectoryListing thisListing;
          do {
            assert lastReturnedName != null;
            thisListing = nnproxy.getPartialListing(p, lastReturnedName);
            if (thisListing == null) {
              if (lastReturnedName.length == 0) {
                LOG.warn("ListPathsServlet - Path " + p + " does not exist");
              }
              break;
            }
            HdfsFileStatus[] listing = thisListing.getPartialListing();
            for (HdfsFileStatus i : listing) {
              String localName = i.getLocalName();
              if (exclude.matcher(localName).matches()
                  || !filter.matcher(localName).matches()) {
                continue;
              }
              if (recur && i.isDir()) {
                pathstack.push(new Path(p, localName).toUri().getPath());
              }
              writeInfo(p, i, doc);
            }
            lastReturnedName = thisListing.getLastName();
          } while (thisListing.hasMore());
        } catch(RemoteException re) {re.writeXml(p, doc);}
      }
      if (doc != null) {
        doc.endDocument();
      }
View Full Code Here

   * @return a listing of the path
   * @throws IOException if any IO error is occurred
   */
  private FileStatus[] iterativeListing(String src) throws IOException {
    // fetch the first batch of entries in the directory
    DirectoryListing thisListing = namenode.getPartialListing(
        src, HdfsFileStatus.EMPTY_NAME);

    if (thisListing == null) { // the directory does not exist
      return null;
     }
    HdfsFileStatus[] partialListing = thisListing.getPartialListing();
    if (!thisListing.hasMore()) { // got all entries of the directory
      FileStatus[] stats = new FileStatus[partialListing.length];
      for (int i = 0; i < partialListing.length; i++) {
        stats[i] = toFileStatus(partialListing[i], src);
      }
      return stats;
    }

    // The directory size is too big that it needs to fetch more
    // estimate the total number of entries in the directory
    int totalNumEntries =
      partialListing.length + thisListing.getRemainingEntries();
    ArrayList<FileStatus> listing =
      new ArrayList<FileStatus>(totalNumEntries);
    // add the first batch of entries to the array list
    for (HdfsFileStatus fileStatus : partialListing) {
      listing.add(toFileStatus(fileStatus, src));
    }

    // now fetch more entries
    do {
      thisListing = namenode.getPartialListing(src, thisListing.getLastName());

      if (thisListing == null) {
        return null; // the directory is deleted
      }

      partialListing = thisListing.getPartialListing();
      for (HdfsFileStatus fileStatus : partialListing) {
        listing.add(toFileStatus(fileStatus, src));
      }
    } while (thisListing.hasMore());

    return listing.toArray(new FileStatus[listing.size()]);
  }
View Full Code Here

          3*1024-100, 1024, (short) 3, 0);
      }

      byte[] start = HdfsFileStatus.EMPTY_NAME;
      for (int j=0;j<numEntries;j++) {
          DirectoryListing dl = cluster.getNameNodeRpc().getListing("/tmp1",
              start, true);
          assertTrue(dl.getPartialListing().length == 1);
          for (int i=0;i<dl.getPartialListing().length; i++) {
              source.remove(dl.getPartialListing()[i].getLocalName());
          }
          start = dl.getLastName();
      }
      // Verify we have listed all entries in the directory.
      assertTrue(source.size() == 0);

      // Now create 6 files, each with 3 locations. Should take 2 iterations of 3
      source.add("f1");
      source.add("f2");
      source.add("f3");
      source.add("f4");
      source.add("f5");
      source.add("f6");
      numEntries = source.size();
      for (int j=0;j<numEntries;j++) {
          DFSTestUtil.createFile(hdfs, new Path("/tmp2/"+source.get(j)), 4096,
          3*1024-100, 1024, (short) 1, 0);
      }

      start = HdfsFileStatus.EMPTY_NAME;
      for (int j=0;j<numEntries/3;j++) {
          DirectoryListing dl = cluster.getNameNodeRpc().getListing("/tmp2",
              start, true);
          assertTrue(dl.getPartialListing().length == 3);
          for (int i=0;i<dl.getPartialListing().length; i++) {
              source.remove(dl.getPartialListing()[i].getLocalName());
          }
          start = dl.getLastName();
      }
      // Verify we have listed all entries in tmp2.
      assertTrue(source.size() == 0);
  } finally {
      if (cluster != null) {
View Full Code Here

      hdfs.mkdirs(new Path("/tmp"));
      DFSTestUtil.createFile(hdfs, new Path("/tmp/f1"), 0, (short) 1, 0);
      DFSTestUtil.createFile(hdfs, new Path("/tmp/f2"), 0, (short) 1, 0);
      DFSTestUtil.createFile(hdfs, new Path("/tmp/f3"), 0, (short) 1, 0);

      DirectoryListing dl = cluster.getNameNodeRpc().getListing("/tmp",
          HdfsFileStatus.EMPTY_NAME, false);
      assertTrue(dl.getPartialListing().length == 3);

      String f2 = new String("f2");
      dl = cluster.getNameNodeRpc().getListing("/tmp", f2.getBytes(), false);
      assertTrue(dl.getPartialListing().length == 1);

      INode f2INode = fsdir.getINode("/tmp/f2");
      String f2InodePath = "/.reserved/.inodes/" + f2INode.getId();
      dl = cluster.getNameNodeRpc().getListing("/tmp", f2InodePath.getBytes(),
          false);
      assertTrue(dl.getPartialListing().length == 1);

      // Test the deleted startAfter file
      hdfs.delete(new Path("/tmp/f2"), false);
      try {
        dl = cluster.getNameNodeRpc().getListing("/tmp",
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.DirectoryListing

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.