Package org.apache.hadoop.hdfs.server.namenode

Examples of org.apache.hadoop.hdfs.server.namenode.FSDirectory


    final FileSystem fs = cluster.getFileSystem();
    assertTrue("Not a HDFS: "+fs.getUri(),
                fs instanceof DistributedFileSystem);

    final DistributedFileSystem dfs = (DistributedFileSystem)fs;
    FSDirectory fsd = cluster.getNameNode().namesystem.dir;
    INodeDirectoryWithQuota rootDir = (INodeDirectoryWithQuota) (fsd
        .getExistingPathINodes("/")[0]);
    try {
      generateFiles(dfs, rootDir, 1024, 512);
      generateFiles(dfs, rootDir, 1019, 512);
    } finally {
View Full Code Here


   
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
    final Path file = new Path(sub, "file");
    DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED);
   
    FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
    INode subNode = fsdir.getINode(sub.toString());
    assertTrue(subNode instanceof INodeDirectoryWithSnapshot);
   
    hdfs.allowSnapshot(sub);
    subNode = fsdir.getINode(sub.toString());
    assertTrue(subNode instanceof INodeDirectorySnapshottable);
   
    hdfs.disallowSnapshot(sub);
    subNode = fsdir.getINode(sub.toString());
    assertTrue(subNode instanceof INodeDirectoryWithSnapshot);
  }
View Full Code Here

  /**
   * Scan all CacheDirectives.  Use the information to figure out
   * what cache replication factor each block should have.
   */
  private void rescanCacheDirectives() {
    FSDirectory fsDir = namesystem.getFSDirectory();
    final long now = new Date().getTime();
    for (CacheDirective directive : cacheManager.getCacheDirectives()) {
      // Skip processing this entry if it has expired
      if (LOG.isTraceEnabled()) {
        LOG.trace("Directive expiry is at " + directive.getExpiryTime());
      }
      if (directive.getExpiryTime() > 0 && directive.getExpiryTime() <= now) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("Skipping directive id " + directive.getId()
              + " because it has expired (" + directive.getExpiryTime() + "<="
              + now + ")");
        }
        continue;
      }
      scannedDirectives++;
      String path = directive.getPath();
      INode node;
      try {
        node = fsDir.getINode(path);
      } catch (UnresolvedLinkException e) {
        // We don't cache through symlinks
        continue;
      }
      if (node == null)  {
View Full Code Here

        fileSystem.append(f);
        fail("Append should fail because insufficient locations");
      } catch (IOException e){
        LOG.info("Expected exception: ", e);
      }
      FSDirectory dir = cluster.getNamesystem().getFSDirectory();
      final INodeFile inode = INodeFile.
          valueOf(dir.getINode("/testAppend"), "/testAppend");
      assertTrue("File should remain closed", !inode.isUnderConstruction());
    } finally {
      if (null != fileSystem) {
        fileSystem.close();
      }
View Full Code Here

   
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
    final Path file = new Path(sub, "file");
    DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED);
   
    FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
    INode subNode = fsdir.getINode(sub.toString());
    assertTrue(subNode.asDirectory().isWithSnapshot());
   
    hdfs.allowSnapshot(sub);
    subNode = fsdir.getINode(sub.toString());
    assertTrue(subNode instanceof INodeDirectorySnapshottable);
   
    hdfs.disallowSnapshot(sub);
    subNode = fsdir.getINode(sub.toString());
    assertTrue(subNode.asDirectory().isWithSnapshot());
  }
View Full Code Here

  @Test (timeout=10000)
  public void testSnapshotLimits() throws Exception {
    // Setup mock objects for SnapshotManager.createSnapshot.
    //
    INodeDirectorySnapshottable ids = mock(INodeDirectorySnapshottable.class);
    FSDirectory fsdir = mock(FSDirectory.class);

    SnapshotManager sm = spy(new SnapshotManager(fsdir));
    doReturn(ids).when(sm).getSnapshottableRoot(anyString());
    doReturn(testMaxSnapshotLimit).when(sm).getMaxSnapshotID();
View Full Code Here

  /**
   * Scan all CacheDirectives.  Use the information to figure out
   * what cache replication factor each block should have.
   */
  private void rescanCacheDirectives() {
    FSDirectory fsDir = namesystem.getFSDirectory();
    final long now = new Date().getTime();
    for (CacheDirective directive : cacheManager.getCacheDirectives()) {
      scannedDirectives++;
      // Skip processing this entry if it has expired
      if (directive.getExpiryTime() > 0 && directive.getExpiryTime() <= now) {
        if (LOG.isDebugEnabled()) {
          LOG.debug("Directive " + directive.getId() + ": the directive " +
              "expired at " + directive.getExpiryTime() + " (now = " +
              now + ")");
        }
        continue;
      }
      String path = directive.getPath();
      INode node;
      try {
        node = fsDir.getINode(path);
      } catch (UnresolvedLinkException e) {
        // We don't cache through symlinks
        if (LOG.isDebugEnabled()) {
          LOG.debug("Directive " + directive.getId() +
              ": got UnresolvedLinkException while resolving path " + path);
View Full Code Here

  @Test
  public void testSpaceCommands() throws Exception {
    // smaller block size, support append
    setUp(true, true);

    FSDirectory fsd = cluster.getNameNode().namesystem.dir;
    INodeDirectoryWithQuota rootDir = (INodeDirectoryWithQuota) (fsd
        .getExistingPathINodes("/")[0]);
    generateFiles(dfs, rootDir, 1024, 512);
    generateFiles(dfs, rootDir, 1019, 512);
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.namenode.FSDirectory

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.