Examples of locatedBlockCount()


Examples of org.apache.hadoop.hdfs.protocol.LocatedBlocks.locatedBlockCount()

      // verify that new block is associated with this file
      DFSClient client = fs.dfs;
      LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                  file1.toString(), 0, Long.MAX_VALUE);
      System.out.println("locations = " + locations.locatedBlockCount());
      assertTrue("Error blocks were not cleaned up for file " + file1,
                 locations.locatedBlockCount() == 3);

      // verify filestatus2.dat
      locations = client.getNamenode().getBlockLocations(
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.LocatedBlocks.locatedBlockCount()

      DFSClient client = fs.dfs;
      LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                  file1.toString(), 0, Long.MAX_VALUE);
      System.out.println("locations = " + locations.locatedBlockCount());
      assertTrue("Error blocks were not cleaned up for file " + file1,
                 locations.locatedBlockCount() == 3);

      // verify filestatus2.dat
      locations = client.getNamenode().getBlockLocations(
                                  file2.toString(), 0, Long.MAX_VALUE);
      System.out.println("locations = " + locations.locatedBlockCount());
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.LocatedBlocks.locatedBlockCount()

                 locations.locatedBlockCount() == 3);

      // verify filestatus2.dat
      locations = client.getNamenode().getBlockLocations(
                                  file2.toString(), 0, Long.MAX_VALUE);
      System.out.println("locations = " + locations.locatedBlockCount());
      assertTrue("Error blocks were not cleaned up for file " + file2,
                 locations.locatedBlockCount() == 1);
    } finally {
      IOUtils.closeStream(fs);
      cluster.shutdown();
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.LocatedBlocks.locatedBlockCount()

      // verify filestatus2.dat
      locations = client.getNamenode().getBlockLocations(
                                  file2.toString(), 0, Long.MAX_VALUE);
      System.out.println("locations = " + locations.locatedBlockCount());
      assertTrue("Error blocks were not cleaned up for file " + file2,
                 locations.locatedBlockCount() == 1);
    } finally {
      IOUtils.closeStream(fs);
      cluster.shutdown();
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.LocatedBlocks.locatedBlockCount()

      // wait for the lease to expire
      try {Thread.sleep(5 * leasePeriod);} catch (InterruptedException e) {}

      LocatedBlocks locations = dfs.dfs.getNamenode().getBlockLocations(
          f, 0, Long.MAX_VALUE);
      assertEquals(1, locations.locatedBlockCount());
      LocatedBlock locatedblock = locations.getLocatedBlocks().get(0);
      int successcount = 0;
      for(DatanodeInfo datanodeinfo: locatedblock.getLocations()) {
        DataNode datanode = cluster.getDataNode(datanodeinfo.getIpcPort());
        ExtendedBlock blk = locatedblock.getBlock();
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.LocatedBlocks.locatedBlockCount()

      }
    }

    // if the file is under construction, then fetch size of last block
    // from datanode.
    if (newInfo.isUnderConstruction() && newInfo.locatedBlockCount() > 0) {
      LocatedBlock last = newInfo.get(newInfo.locatedBlockCount()-1);
      if (last.getLocations().length > 0) {
        try {
          Block newBlock = getBlockInfo(last);
          // only if the block has data (not null)
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.LocatedBlocks.locatedBlockCount()

    }

    // if the file is under construction, then fetch size of last block
    // from datanode.
    if (newInfo.isUnderConstruction() && newInfo.locatedBlockCount() > 0) {
      LocatedBlock last = newInfo.get(newInfo.locatedBlockCount()-1);
      if (last.getLocations().length > 0) {
        try {
          Block newBlock = getBlockInfo(last);
          // only if the block has data (not null)
          if (newBlock != null) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.LocatedBlocks.locatedBlockCount()

    }
    isOpen = blocks.isUnderConstruction();
    if (isOpen && !showOpenFiles) {
      // We collect these stats about open files to report with default options
      res.totalOpenFilesSize += fileLen;
      res.totalOpenFilesBlocks += blocks.locatedBlockCount();
      res.totalOpenFiles++;
      return;
    }
    res.totalFiles++;
    res.totalSize += fileLen;
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.LocatedBlocks.locatedBlockCount()

      res.totalOpenFiles++;
      return;
    }
    res.totalFiles++;
    res.totalSize += fileLen;
    res.totalBlocks += blocks.locatedBlockCount();
    if (showOpenFiles && isOpen) {
      out.print(path + " " + fileLen + " bytes, " +
        blocks.locatedBlockCount() + " block(s), OPENFORWRITE: ");
    } else if (showFiles) {
      out.print(path + " " + fileLen + " bytes, " +
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.LocatedBlocks.locatedBlockCount()

    res.totalFiles++;
    res.totalSize += fileLen;
    res.totalBlocks += blocks.locatedBlockCount();
    if (showOpenFiles && isOpen) {
      out.print(path + " " + fileLen + " bytes, " +
        blocks.locatedBlockCount() + " block(s), OPENFORWRITE: ");
    } else if (showFiles) {
      out.print(path + " " + fileLen + " bytes, " +
        blocks.locatedBlockCount() + " block(s): ");
    } else {
      out.print('.');
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.