Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.Block


    case DatanodeProtocol.DNA_INVALIDATE:
      //
      // Some local block(s) are obsolete and can be
      // safely garbage-collected.
      //
      Block toDelete[] = bcmd.getBlocks();
      try {
        if (blockScanner != null) {
          blockScanner.deleteBlocks(toDelete);
        }
        data.invalidate(toDelete);
View Full Code Here


      ) throws IOException {
    if (LOG.isDebugEnabled()) {
      LOG.debug("block=" + block);
    }
   
    Block stored = data.getStoredBlock(block.getBlockId());

    if (stored == null) {
      return null;
    }
    BlockMetaDataInfo info = new BlockMetaDataInfo(stored,
                                 blockScanner.getLastScanTime(stored));
    if (LOG.isDebugEnabled()) {
      LOG.debug("getBlockMetaDataInfo successful block=" + stored +
                " length " + stored.getNumBytes() +
                " genstamp " + stored.getGenerationStamp());
    }

    // paranoia! verify that the contents of the stored block
    // matches the block file on disk.
    data.validateBlockMetadata(stored);
View Full Code Here

    DatanodeID[] datanodeids = (DatanodeID[])targets;
    // If the block is already being recovered, then skip recovering it.
    // This can happen if the namenode and client start recovering the same
    // file at the same time.
    synchronized (ongoingRecovery) {
      Block tmp = new Block();
      tmp.set(block.getBlockId(), block.getNumBytes(), GenerationStamp.WILDCARD_STAMP);
      if (ongoingRecovery.get(tmp) != null) {
        String msg = "Block " + block + " is already being recovered, " +
                     " ignoring this request to recover it.";
        LOG.info(msg);
        throw new IOException(msg);
View Full Code Here

    return map.get(b);
  }

  /** Return the block object without matching against generation stamp. */
  BlockInfo getStoredBlockWithoutMatchingGS(Block b) {
    return map.get(new Block(b.getBlockId()));
  }
View Full Code Here

                  Collection<Block> toAdd,
                  Collection<Block> toRemove,
                  Collection<Block> toInvalidate) {
    // place a deilimiter in the list which separates blocks
    // that have been reported from those that have not
    BlockInfo delimiter = new BlockInfo(new Block(), 1);
    boolean added = this.addBlock(delimiter);
    assert added : "Delimiting block cannot be present in the node";
    if(newReport == null)
      newReport = new BlockListAsLongs( new long[0]);
    // scan the report and collect newly reported blocks
    // Note we are taking special precaution to limit tmp blocks allocated
    // as part this block report - which why block list is stored as longs
    Block iblk = new Block(); // a fixed new'ed block to be reused with index i
    Block oblk = new Block(); // for fixing genstamps
    for (int i = 0; i < newReport.getNumberOfBlocks(); ++i) {
      iblk.set(newReport.getBlockId(i), newReport.getBlockLen(i),
               newReport.getBlockGenStamp(i));
      BlockInfo storedBlock = blocksMap.getStoredBlock(iblk);
      if(storedBlock == null) {
        // if the block with a WILDCARD generation stamp matches
        // then accept this block.
        // This block has a diferent generation stamp on the datanode
        // because of a lease-recovery-attempt.
        oblk.set(newReport.getBlockId(i), newReport.getBlockLen(i),
                 GenerationStamp.WILDCARD_STAMP);
        storedBlock = blocksMap.getStoredBlock(oblk);
        if (storedBlock != null && storedBlock.getINode() != null &&
            (storedBlock.getGenerationStamp() <= iblk.getGenerationStamp() ||
             storedBlock.getINode().isUnderConstruction())) {
          // accept block. It wil be cleaned up on cluster restart.
        } else {
          storedBlock = null;
        }
      }
      if(storedBlock == null) {
        // If block is not in blocksMap it does not belong to any file
        toInvalidate.add(new Block(iblk));
        continue;
      }
      if(storedBlock.findDatanode(this) < 0) {// Known block, but not on the DN
        // if the size differs from what is in the blockmap, then return
        // the new block. addStoredBlock will then pick up the right size of this
        // block and will update the block object in the BlocksMap
        if (storedBlock.getNumBytes() != iblk.getNumBytes()) {
          toAdd.add(new Block(iblk));
        } else {
          toAdd.add(storedBlock);
        }
        continue;
      }
View Full Code Here

        }
        if (imgVersion <= -8) {
          blockSize = in.readLong();
        }
        int numBlocks = in.readInt();
        Block blocks[] = null;

        // for older versions, a blocklist of size 0
        // indicates a directory.
        if ((-9 <= imgVersion && numBlocks > 0) ||
            (imgVersion < -9 && numBlocks >= 0)) {
          blocks = new Block[numBlocks];
          for (int j = 0; j < numBlocks; j++) {
            blocks[j] = new Block();
            if (-14 < imgVersion) {
              blocks[j].set(in.readLong(), in.readLong(),
                            Block.GRANDFATHER_GENERATION_STAMP);
            } else {
              blocks[j].readFields(in);
View Full Code Here

    short blockReplication = in.readShort();
    long modificationTime = in.readLong();
    long preferredBlockSize = in.readLong();
    int numBlocks = in.readInt();
    BlockInfo[] blocks = new BlockInfo[numBlocks];
    Block blk = new Block();
    for (int i = 0; i < numBlocks; i++) {
      blk.readFields(in);
      blocks[i] = new BlockInfo(blk, blockReplication);
    }
    PermissionStatus perm = PermissionStatus.read(in);
    String clientName = readString(in);
    String clientMachine = readString(in);
View Full Code Here

    int underReplicatedPerFile = 0;
    int misReplicatedPerFile = 0;
    StringBuffer report = new StringBuffer();
    int i = 0;
    for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
      Block block = lBlk.getBlock();
      boolean isCorrupt = lBlk.isCorrupt();
      String blkName = block.toString();
      DatanodeInfo[] locs = lBlk.getLocations();
      res.totalReplicas += locs.length;
      short targetFileReplication = file.getReplication();
      if (locs.length > targetFileReplication) {
        res.excessiveReplicas += (locs.length - targetFileReplication);
        res.numOverReplicatedBlocks += 1;
      }
      // Check if block is Corrupt
      if (isCorrupt) {
        corrupt++;
        res.corruptBlocks++;
        out.print("\n" + path + ": CORRUPT block " + block.getBlockName()+"\n");
      }
      if (locs.length >= minReplication)
        res.numMinReplicatedBlocks++;
      if (locs.length < targetFileReplication && locs.length > 0) {
        res.missingReplicas += (targetFileReplication - locs.length);
        res.numUnderReplicatedBlocks += 1;
        underReplicatedPerFile++;
        if (!showFiles) {
          out.print("\n" + path + ": ");
        }
        out.println(" Under replicated " + block +
                    ". Target Replicas is " +
                    targetFileReplication + " but found " +
                    locs.length + " replica(s).");
      }
      // verify block placement policy
      int missingRacks = ReplicationTargetChooser.verifyBlockPlacement(
                    lBlk, targetFileReplication, networktopology);
      if (missingRacks > 0) {
        res.numMisReplicatedBlocks++;
        misReplicatedPerFile++;
        if (!showFiles) {
          if(underReplicatedPerFile == 0)
            out.println();
          out.print(path + ": ");
        }
        out.println(" Replica placement policy is violated for " +
                    block +
                    ". Block should be additionally replicated on " +
                    missingRacks + " more rack(s).");
      }
      report.append(i + ". " + blkName + " len=" + block.getNumBytes());
      if (locs.length == 0) {
        report.append(" MISSING!");
        res.addMissing(block.toString(), block.getNumBytes());
        missing++;
        missize += block.getNumBytes();
      } else {
        report.append(" repl=" + locs.length);
        if (showLocations || showRacks) {
          StringBuffer sb = new StringBuffer("[");
          for (int j = 0; j < locs.length; j++) {
View Full Code Here

    int failures = 0;
    InetSocketAddress targetAddr = null;
    TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>();
    Socket s = null;
    DFSClient.BlockReader blockReader = null;
    Block block = lblock.getBlock();

    while (s == null) {
      DatanodeInfo chosenNode;
     
      try {
        chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes);
        targetAddr = NetUtils.createSocketAddr(chosenNode.getName());
      catch (IOException ie) {
        if (failures >= DFSClient.MAX_BLOCK_ACQUIRE_FAILURES) {
          throw new IOException("Could not obtain block " + lblock);
        }
        LOG.info("Could not obtain block from any node:  " + ie);
        try {
          Thread.sleep(10000);
        catch (InterruptedException iex) {
        }
        deadNodes.clear();
        failures++;
        continue;
      }
      try {
        s = new Socket();
        s.connect(targetAddr, HdfsConstants.READ_TIMEOUT);
        s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
       
        blockReader =
          DFSClient.BlockReader.newBlockReader(s, targetAddr.toString() + ":" +
                                               block.getBlockId(),
                                               block.getBlockId(),
                                               lblock.getBlockToken(),
                                               block.getGenerationStamp(),
                                               0, -1,
                                               conf.getInt("io.file.buffer.size", 4096));
       
      catch (IOException ex) {
        // Put chosen node into dead list, continue
        LOG.info("Failed to connect to " + targetAddr + ":" + ex);
        deadNodes.add(chosenNode);
        if (s != null) {
          try {
            s.close();
          } catch (IOException iex) {
          }
        }
        s = null;
      }
    }
    if (blockReader == null) {
      throw new Exception("Could not open data stream for " + lblock.getBlock());
    }
    byte[] buf = new byte[1024];
    int cnt = 0;
    boolean success = true;
    long bytesRead = 0;
    try {
      while ((cnt = blockReader.read(buf, 0, buf.length)) > 0) {
        fos.write(buf, 0, cnt);
        bytesRead += cnt;
      }
      if ( bytesRead != block.getNumBytes() ) {
        throw new IOException("Recorded block size is " + block.getNumBytes() +
                              ", but datanode returned " +bytesRead+" bytes");
      }
    } catch (Exception e) {
      e.printStackTrace();
      success = false;
View Full Code Here

        long id2 = 0x0123456789abcdefL;
        long id3 = 0xfedcba0987654321L;
        long id4 = 0xfedcba9876543210L;
        long id5 = 0x0L;

        blocks.add(new LocatedBlock(new Block(id1), null));
        blocks.add(new LocatedBlock(new Block(id2), null));
        blocks.add(new LocatedBlock(new Block(id3), null));
        blocks.add(new LocatedBlock(new Block(id4), null));
        blocks.add(new LocatedBlock(new Block(id5), null));
        expectedResult.put("hdfs_B12", "*34567890abcdef*");
        expectedResult.put("hdfs_B01", "*23456789abcdef*");
        expectedResult.put("hdfs_Bfe", "*dcba0987654321*dcba9876543210*");
        expectedResult.put("hdfs_B00", "*00000000000000*");

        result = (Map)callPrivateMethod(instance, "buildBlockRequests", new Class[] {Collection.class}, new Object[] {blocks});
        assertEquals("The buildBlockRequests() method did not build the expected requests: ", expectedResult, result);

        blocks.clear();
        blocks.add(new LocatedBlock(new Block(id3), null));
        blocks.add(new LocatedBlock(new Block(id4), null));
        expectedResult.clear();
        expectedResult.put("hdfs_Bfe", "*dcba0987654321*dcba9876543210*");

        result = (Map)callPrivateMethod(instance, "buildBlockRequests", new Class[] {Collection.class}, new Object[] {blocks});
        assertEquals("The buildBlockRequests() method did not build the expected requests: ", expectedResult, result);

        blocks.clear();
        blocks.add(new LocatedBlock(new Block(id3), null));
        expectedResult.clear();
        expectedResult.put("hdfs_Bfe", "*dcba0987654321*");

        result = (Map)callPrivateMethod(instance, "buildBlockRequests", new Class[] {Collection.class}, new Object[] {blocks});
        assertEquals("The buildBlockRequests() method did not build the expected requests: ", expectedResult, result);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.Block

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.