Examples of FSNamesystem


Examples of org.apache.hadoop.hdfs.server.namenode.FSNamesystem

    short REPLICATION_FACTOR = 3;
    final Path filePath = new Path("/testFile");
    String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(racks.length).racks(racks).build();
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();

    try {
      // Create a file with one block
      final FileSystem fs = cluster.getFileSystem();
      DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSNamesystem

    final Path filePath = new Path("/testFile");
    // Last datanode is on a different rack
    String racks[] = {"/rack1", "/rack1", "/rack1", "/rack2", "/rack2"};
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(racks.length).racks(racks).build();
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();
    final DatanodeManager dm = ns.getBlockManager().getDatanodeManager();

    try {
      // Create a file with one block with a replication factor of 2
      final FileSystem fs = cluster.getFileSystem();
      DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSNamesystem

    final Path filePath = new Path("/testFile");
    // Last datanode is on a different rack
    String racks[] = {"/rack1", "/rack1", "/rack2"};
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(racks.length).racks(racks).build();
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();
    final DatanodeManager dm = ns.getBlockManager().getDatanodeManager();

    try {
      // Create a file with one block
      final FileSystem fs = cluster.getFileSystem();
      DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSNamesystem

    // Two blocks and four racks
    String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(racks.length).racks(racks).build();
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();

    try {
      // Create a file with one block
      final FileSystem fs = cluster.getFileSystem();
      DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
      ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
      DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

      // Decommission one of the hosts with the block, this should cause
      // the block to get replicated to another host on the same rack,
      // otherwise the rack policy is violated.
      BlockLocation locs[] = fs.getFileBlockLocations(
          fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
      String name = locs[0].getNames()[0];
      DFSTestUtil.writeFile(localFileSys, excludeFile, name);
      ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
      DFSTestUtil.waitForDecommission(fs, name);

      // Check the block still has sufficient # replicas across racks
      DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
    } finally {
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSNamesystem

    // All hosts are on two racks, only one host on /rack2
    String racks[] = {"/rack1", "/rack2", "/rack1", "/rack1", "/rack1"};
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(racks.length).racks(racks).build();
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();

    try {
      final FileSystem fs = cluster.getFileSystem();
      DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
      ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
      DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

      // Lower the replication factor so the blocks are over replicated
      REPLICATION_FACTOR = 2;
      fs.setReplication(filePath, REPLICATION_FACTOR);

      // Decommission one of the hosts with the block that is not on
      // the lone host on rack2 (if we decomission that host it would
      // be impossible to respect the rack policy).
      BlockLocation locs[] = fs.getFileBlockLocations(
          fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
      for (String top : locs[0].getTopologyPaths()) {
        if (!top.startsWith("/rack2")) {
          String name = top.substring("/rack1".length()+1);
          DFSTestUtil.writeFile(localFileSys, excludeFile, name);
          ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
          DFSTestUtil.waitForDecommission(fs, name);
          break;
        }
      }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSNamesystem

  }

  static DatanodeInfo chooseDatanode(final NameNode namenode,
      final String path, final HttpOpParam.Op op, final long openOffset,
      final long blocksize) throws IOException {
    final FSNamesystem ns = namenode.getNamesystem();

    if (op == PutOpParam.Op.CREATE) {
      //choose a datanode near to client
      final DatanodeInfo dn = ns.chooseDatanode(path, getRemoteAddress(), blocksize);
      if (dn != null) {
        return dn;
      }
    } else if (op == GetOpParam.Op.OPEN
        || op == GetOpParam.Op.GETFILECHECKSUM
        || op == PostOpParam.Op.APPEND) {
      //choose a datanode containing a replica
      final HdfsFileStatus status = namenode.getFileInfo(path);
      if (status == null) {
        throw new FileNotFoundException("File " + path + " not found.");
      }
      final long len = status.getLen();
      if (op == GetOpParam.Op.OPEN) {
        if (openOffset < 0L || (openOffset >= len && len > 0)) {
          throw new IOException("Offset=" + openOffset
              + " out of the range [0, " + len + "); " + op + ", path=" + path);
        }
      }

      if (len > 0) {
        final long offset = op == GetOpParam.Op.OPEN? openOffset: len - 1;
        final LocatedBlocks locations = namenode.getBlockLocations(
            path, offset, 1);
        final int count = locations.locatedBlockCount();
        if (count > 0) {
          return JspHelper.bestNode(locations.get(0));
        }
      }
    }

    return ns.getRandomDatanode();
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSNamesystem

   * versions at all times.
   */
  @Test
  public void testNumVersionsReportedCorrect() throws IOException {
    //Create the DatanodeManager which will be tested
    FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
    Mockito.when(fsn.hasWriteLock()).thenReturn(true);
    DatanodeManager dm = new DatanodeManager(Mockito.mock(BlockManager.class),
      fsn, new Configuration());

    //Seed the RNG with a known value so test failures are easier to reproduce
    Random rng = new Random();
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSNamesystem

    }
    assertEquals("Num physical blocks should match num stored in the NN",
        totalReal, totalNN);

    // now check the number of under-replicated blocks
    FSNamesystem fsn = cluster.getNamesystem();
    // force update of all the metric counts by calling computeDatanodeWork
    BlockManagerTestUtil.getComputedDatanodeWork(fsn.getBlockManager());
    // get all the counts
    long underRepl = fsn.getUnderReplicatedBlocks();
    long pendRepl = fsn.getPendingReplicationBlocks();
    long totalRepl = underRepl + pendRepl;
    System.out.println("underreplicated after = "+ underRepl +
        " and pending repl ="  + pendRepl + "; total underRepl = " + totalRepl);

    System.out.println("total blocks (real and replicating):" +
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSNamesystem

  public void testRetryCacheOnStandbyNN() throws Exception {
    // 1. run operations
    DFSTestUtil.runOperations(cluster, dfs, conf, BlockSize, 0);
   
    // check retry cache in NN1
    FSNamesystem fsn0 = cluster.getNamesystem(0);
    LightWeightCache<CacheEntry, CacheEntry> cacheSet =
        (LightWeightCache<CacheEntry, CacheEntry>) fsn0.getRetryCache().getCacheSet();
    assertEquals(14, cacheSet.size());
   
    Map<CacheEntry, CacheEntry> oldEntries =
        new HashMap<CacheEntry, CacheEntry>();
    Iterator<CacheEntry> iter = cacheSet.iterator();
    while (iter.hasNext()) {
      CacheEntry entry = iter.next();
      oldEntries.put(entry, entry);
    }
   
    // 2. Failover the current standby to active.
    cluster.getNameNode(0).getRpcServer().rollEditLog();
    cluster.getNameNode(1).getNamesystem().getEditLogTailer().doTailEdits();
   
    cluster.shutdownNameNode(0);
    cluster.transitionToActive(1);
   
    // 3. check the retry cache on the new active NN
    FSNamesystem fsn1 = cluster.getNamesystem(1);
    cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) fsn1
        .getRetryCache().getCacheSet();
    assertEquals(14, cacheSet.size());
    iter = cacheSet.iterator();
    while (iter.hasNext()) {
      CacheEntry entry = iter.next();
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSNamesystem

     * ToFix
     * If the metrics counter were instead stored in the metrics objects themselves
     * we could avoid copying the values on each update.
     */
    synchronized (this) {
      FSNamesystem fsNameSystem = FSNamesystem.getFSNamesystem();
      filesTotal.set((int)fsNameSystem.getFilesTotal());
      blocksTotal.set((int)fsNameSystem.getBlocksTotal());
      capacityTotalGB.set(roundBytesToGBytes(fsNameSystem.getCapacityTotal()));
      capacityUsedGB.set(roundBytesToGBytes(fsNameSystem.getCapacityUsed()));
      capacityRemainingGB.set(roundBytesToGBytes(fsNameSystem.
                                               getCapacityRemaining()));
      totalLoad.set(fsNameSystem.getTotalLoad());
      pendingReplicationBlocks.set((int)fsNameSystem.
                                   getPendingReplicationBlocks());
      underReplicatedBlocks.set((int)fsNameSystem.getUnderReplicatedBlocks());
      scheduledReplicationBlocks.set((int)fsNameSystem.
                                      getScheduledReplicationBlocks());
      missingBlocks.set((int)fsNameSystem.getMissingBlocksCount());

      for (MetricsBase m : registry.getMetricsList()) {
        m.pushMetric(metricsRecord);
      }
    }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.