Examples of locatedBlockCount()


Examples of org.apache.hadoop.hdfs.protocol.LocatedBlocks.locatedBlockCount()

    res.totalFiles++;
    res.totalSize += fileLen;
    res.totalBlocks += blocks.locatedBlockCount();
    if (showOpenFiles && isOpen) {
      out.print(path + " " + fileLen + " bytes, " +
        blocks.locatedBlockCount() + " block(s), OPENFORWRITE: ");
    } else if (showFiles) {
      out.print(path + " " + fileLen + " bytes, " +
        blocks.locatedBlockCount() + " block(s): ");
    } else {
      out.print('.');
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.LocatedBlocks.locatedBlockCount()

    if (showOpenFiles && isOpen) {
      out.print(path + " " + fileLen + " bytes, " +
        blocks.locatedBlockCount() + " block(s), OPENFORWRITE: ");
    } else if (showFiles) {
      out.print(path + " " + fileLen + " bytes, " +
        blocks.locatedBlockCount() + " block(s): ");
    } else {
      out.print('.');
    }
    if (res.totalFiles % 100 == 0) { out.println(); out.flush(); }
    int missing = 0;
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.LocatedBlocks.locatedBlockCount()

    // Now abandon the last block
    DFSClient dfsclient = DFSClientAdapter.getDFSClient(fs);
    LocatedBlocks blocks =
      dfsclient.getNamenode().getBlockLocations(src, 0, Integer.MAX_VALUE);
    int orginalNumBlocks = blocks.locatedBlockCount();
    LocatedBlock b = blocks.getLastLocatedBlock();
    dfsclient.getNamenode().abandonBlock(b.getBlock(), fileId, src,
        dfsclient.clientName);
   
    // call abandonBlock again to make sure the operation is idempotent
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.LocatedBlocks.locatedBlockCount()

    // Close cluster and check the block has been abandoned after restart
    cluster.restartNameNode();
    blocks = dfsclient.getNamenode().getBlockLocations(src, 0,
        Integer.MAX_VALUE);
    Assert.assertEquals("Blocks " + b + " has not been abandoned.",
        orginalNumBlocks, blocks.locatedBlockCount() + 1);
  }

  @Test
  /** Make sure that the quota is decremented correctly when a block is abandoned */
  public void testQuotaUpdatedWhenBlockAbandoned() throws IOException {
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.LocatedBlocks.locatedBlockCount()

      // verify that no blocks are associated with this file
      // bad block allocations were cleaned up earlier.
      LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                  file1.toString(), 0, Long.MAX_VALUE);
      System.out.println("locations = " + locations.locatedBlockCount());
      assertTrue("Error blocks were not cleaned up",
                 locations.locatedBlockCount() == 0);
    } finally {
      cluster.shutdown();
      client.close();
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.LocatedBlocks.locatedBlockCount()

      // bad block allocations were cleaned up earlier.
      LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                  file1.toString(), 0, Long.MAX_VALUE);
      System.out.println("locations = " + locations.locatedBlockCount());
      assertTrue("Error blocks were not cleaned up",
                 locations.locatedBlockCount() == 0);
    } finally {
      cluster.shutdown();
      client.close();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.LocatedBlocks.locatedBlockCount()

                         + "Created file filestatus.dat with one replicas.");

      LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                  file1.toString(), 0, Long.MAX_VALUE);
      System.out.println("testFileCreationError2: "
          + "The file has " + locations.locatedBlockCount() + " blocks.");

      // add one block to the file
      LocatedBlock location = client.getNamenode().addBlock(file1.toString(),
          client.clientName, null, null, INodeId.GRANDFATHER_INODE_ID, null);
      System.out.println("testFileCreationError2: "
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.LocatedBlocks.locatedBlockCount()

      System.out.println("testFileCreationError2: "
          + "Added block " + location.getBlock());

      locations = client.getNamenode().getBlockLocations(file1.toString(),
                                                    0, Long.MAX_VALUE);
      int count = locations.locatedBlockCount();
      System.out.println("testFileCreationError2: "
          + "The file now has " + count + " blocks.");
     
      // set the soft and hard limit to be 1 second so that the
      // namenode triggers lease recovery
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.LocatedBlocks.locatedBlockCount()

      // verify that the last block was synchronized.
      locations = client.getNamenode().getBlockLocations(file1.toString(),
                                                    0, Long.MAX_VALUE);
      System.out.println("testFileCreationError2: "
          + "locations = " + locations.locatedBlockCount());
      assertEquals(0, locations.locatedBlockCount());
      System.out.println("testFileCreationError2 successful");
    } finally {
      IOUtils.closeStream(dfs);
      cluster.shutdown();
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.LocatedBlocks.locatedBlockCount()

      // verify that the last block was synchronized.
      locations = client.getNamenode().getBlockLocations(file1.toString(),
                                                    0, Long.MAX_VALUE);
      System.out.println("testFileCreationError2: "
          + "locations = " + locations.locatedBlockCount());
      assertEquals(0, locations.locatedBlockCount());
      System.out.println("testFileCreationError2 successful");
    } finally {
      IOUtils.closeStream(dfs);
      cluster.shutdown();
    }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.