Package org.apache.hadoop.hdfs.server.datanode

Examples of org.apache.hadoop.hdfs.server.datanode.FSDataset$BlockPoolSlice


    List<File> files = new ArrayList<File>();
    List<DataNode> datanodes = cluster.getDataNodes();
    int nsId = cluster.getNameNode().getNamespaceID();
    Block[][] blocks = cluster.getAllBlockReports(nsId);
    for(int i = 0; i < blocks.length; i++) {
      FSDataset ds = (FSDataset)datanodes.get(i).getFSDataset();
      for(Block b : blocks[i]) {
        files.add(ds.getBlockFile(nsId,b));
      }       
    }
    return files;
  }
View Full Code Here


    assertEquals(len1, lb.getBlockSize());

    DatanodeInfo[] datanodeinfos = lb.getLocations();
    assertEquals(repl, datanodeinfos.length);
    final DataNode dn = cluster.getDataNode(datanodeinfos[0].getIpcPort());
    final FSDataset data = (FSDataset)dn.getFSDataset();
    int nsId = cluster.getNameNode().getNamespaceID();
    final RandomAccessFile raf = new RandomAccessFile(data.getBlockFile(nsId, blk), "rw");
    AppendTestUtil.LOG.info("dn=" + dn + ", blk=" + blk + " (length=" + blk.getNumBytes() + ")");
    if (!dn.useInlineChecksum) {
      assertEquals(len1, raf.length());
    } else {
      int bytesPerChecksum = conf.getInt("io.bytes.per.checksum", 512);
View Full Code Here

    List<File> files = new ArrayList<File>();
    List<DataNode> datanodes = cluster.getDataNodes();
    int nsId = cluster.getNameNode().getNamespaceID();
    Block[][] blocks = cluster.getAllBlockReports(nsId);
    for(int i = 0; i < blocks.length; i++) {
      FSDataset ds = (FSDataset)datanodes.get(i).getFSDataset();
      for(Block b : blocks[i]) {
        files.add(ds.getBlockFile(nsId,b));
      }       
    }
    return files;
  }
View Full Code Here

         
          Set<Integer> portSet = new HashSet<Integer>();
          for (DatanodeInfo dnInfo : locatedblock.getLocations()) {
            portSet.add(dnInfo.getPort());
          }
          FSDataset fsd = null;
          for (DataNode dn : cluster.getDataNodes()) {
            if (portSet.contains(dn.getPort())) {
              fsd = (FSDataset) (dn.data);
              TestCase.assertNotNull(fsd);
              DatanodeBlockInfo binfo = fsd.getDatanodeBlockInfo(nsId,
                  locatedblock.getBlock());
              TestCase.assertNotNull(binfo);
              long waitTimeLeft = 20000;
              while (true) {
                if (binfo.hasBlockCrcInfo()) {
View Full Code Here

      assertEquals(1, locations.locatedBlockCount());
      LocatedBlock locatedblock = locations.getLocatedBlocks().get(0);
      int nsId = cluster.getNameNode().getNamespaceID();
      for(DatanodeInfo datanodeinfo: locatedblock.getLocations()) {
        DataNode datanode = cluster.getDataNode(datanodeinfo.ipcPort);
        FSDataset dataset = (FSDataset)datanode.data;
        Block b = dataset.getStoredBlock(nsId, locatedblock.getBlock().getBlockId());
        Block newBlock = new Block(b);
        newBlock.setGenerationStamp(6661);
        dataset.updateBlock(nsId, b, newBlock);

        Block newBlock1 = new Block(b);
        newBlock1.setGenerationStamp(6662);
        boolean hitException = false;
        try {
          dataset.updateBlock(nsId, b, newBlock1);
        } catch (IOException e) {
          hitException = true;
        }
        TestCase.assertTrue("Shouldn't allow update block when generation doesn't match", hitException);
        dataset.updateBlock(nsId, newBlock, newBlock1);
      }
    } finally {
      IOUtils.closeStream(dfs);
      cluster.shutdown();
    }
View Full Code Here

                  dn.length == 1);

      LocatedBlocks locations = client.namenode.getBlockLocations(
                                  file1.toString(), 0, Long.MAX_VALUE);
      List<LocatedBlock> blocks = locations.getLocatedBlocks();
      FSDataset dataset = (FSDataset) dn[0].data;

      //
      // Create hard links for a few of the blocks
      //
      for (int i = 0; i < blocks.size(); i = i + 2) {
        Block b = (Block) blocks.get(i).getBlock();
        FSDataset fsd = (FSDataset) dataset;
        File f = fsd.getFile(nsId, b);
        File link = new File(f.toString() + ".link");
        System.out.println("Creating hardlink for File " + f +
                           " to " + link);
        HardLink.createHardLink(f, link);
      }
View Full Code Here

      LocatedBlock locatedblock = locations.getLocatedBlocks().get(0);
      int successcount = 0;
      int nsId = cluster.getNameNode().getNamespaceID();
      for(DatanodeInfo datanodeinfo: locatedblock.getLocations()) {
        DataNode datanode = cluster.getDataNode(datanodeinfo.ipcPort);
        FSDataset dataset = (FSDataset)datanode.data;
        Block b = dataset.getStoredBlock(nsId, locatedblock.getBlock().getBlockId());
        File blockfile = dataset.getReplicaToRead(nsId, b).getDataFileToRead();
        System.out.println("blockfile=" + blockfile);
        if (blockfile != null) {
          BufferedReader in = new BufferedReader(new FileReader(blockfile));
          assertEquals("something", in.readLine());
          in.close();
View Full Code Here

    super.tearDown();
  }

  public void testParallelCheckDirs() throws Exception {
    final DataNode datanode = cluster.getDataNodes().get(0);
    FSDataset fsDataset = (FSDataset) datanode.data;
    datanode.data = spy(fsDataset);

    final Method checkDiskMethod = DataNode.class.getDeclaredMethod(
        "checkDiskError", Exception.class);
    checkDiskMethod.setAccessible(true);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.datanode.FSDataset$BlockPoolSlice

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.