Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.Block


        datanodes[i] = cluster.getDataNode(datanodeinfos[i].getIpcPort());
        assertTrue(datanodes[i] != null);
      }
     
      //verify BlockMetaDataInfo
      Block lastblock = locatedblock.getBlock();
      DataNode.LOG.info("newblocks=" + lastblock);
      for(int i = 0; i < REPLICATION_NUM; i++) {
        checkMetaInfo(lastblock, idps[i]);
      }

      //setup random block sizes
      int lastblocksize = ORG_FILE_SIZE % BLOCK_SIZE;
      Integer[] newblocksizes = new Integer[REPLICATION_NUM];
      for(int i = 0; i < REPLICATION_NUM; i++) {
        newblocksizes[i] = AppendTestUtil.nextInt(lastblocksize);
      }
      if (forceOneBlockToZero) {
        newblocksizes[0] = 0;
      }
      DataNode.LOG.info("newblocksizes = " + Arrays.asList(newblocksizes));

      //update blocks with random block sizes
      Block[] newblocks = new Block[REPLICATION_NUM];
      for(int i = 0; i < REPLICATION_NUM; i++) {
        DataNode dn = datanodes[i];
        FSDatasetTestUtil.truncateBlock(dn, lastblock, newblocksizes[i]);
        newblocks[i] = new Block(lastblock.getBlockId(), newblocksizes[i],
            lastblock.getGenerationStamp());
        checkMetaInfo(newblocks[i], idps[i]);
      }

      DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName);
      cluster.getNameNode().append(filestr, dfs.dfs.clientName);

      //block synchronization
      final int primarydatanodeindex = AppendTestUtil.nextInt(datanodes.length);
      DataNode.LOG.info("primarydatanodeindex  =" + primarydatanodeindex);
      DataNode primary = datanodes[primarydatanodeindex];
      DataNode.LOG.info("primary.dnRegistration=" + primary.dnRegistration);
      primary.recoverBlocks(new Block[]{lastblock}, new DatanodeInfo[][]{datanodeinfos}).join();

      BlockMetaDataInfo[] updatedmetainfo = new BlockMetaDataInfo[REPLICATION_NUM];
      int minsize = min(newblocksizes);
      long currentGS = cluster.getNameNode().namesystem.getGenerationStamp();
      lastblock.setGenerationStamp(currentGS);
      for(int i = 0; i < REPLICATION_NUM; i++) {
        updatedmetainfo[i] = idps[i].getBlockMetaDataInfo(lastblock);
        assertEquals(lastblock.getBlockId(), updatedmetainfo[i].getBlockId());
        assertEquals(minsize, updatedmetainfo[i].getNumBytes());
        assertEquals(currentGS, updatedmetainfo[i].getGenerationStamp());
      }
    }
    finally {
View Full Code Here


    }

    List<DatanodeID> successList = new ArrayList<DatanodeID>();

    long generationstamp = namenode.nextGenerationStamp(block);
    Block newblock = new Block(block.getBlockId(), block.getNumBytes(), generationstamp);

    for(BlockRecord r : syncList) {
      try {
        r.datanode.updateBlock(r.info.getBlock(), newblock, closeFile);
        successList.add(r.id);
      } catch (IOException e) {
        InterDatanodeProtocol.LOG.warn("Failed to updateBlock (newblock="
            + newblock + ", datanode=" + r.id + ")", e);
      }
    }

    if (!successList.isEmpty()) {
      DatanodeID[] nlist = successList.toArray(new DatanodeID[successList.size()]);

      namenode.commitBlockSynchronization(block,
          newblock.getGenerationStamp(), newblock.getNumBytes(), closeFile, false,
          nlist);
      DatanodeInfo[] info = new DatanodeInfo[nlist.length];
      for (int i = 0; i < nlist.length; i++) {
        info[i] = new DatanodeInfo(nlist[i]);
      }
View Full Code Here

    return recoverBlock(block, keepLength, targets, false);
  }

  /** {@inheritDoc} */
  public Block getBlockInfo(Block block) throws IOException {
    Block stored = data.getStoredBlock(block.getBlockId());
    return stored;
  }
View Full Code Here

    }
  }

  /** Return the block file for the given ID */
  public synchronized File findBlockFile(long blockId) {
    final Block b = new Block(blockId);
    File blockfile = null;
    ActiveFile activefile = ongoingCreates.get(b);
    if (activefile != null) {
      blockfile = activefile.file;
    }
View Full Code Here

    File blockfile = findBlockFile(blkid);
    if (blockfile == null) {
      return null;
    }
    File metafile = findMetaFile(blockfile);
    Block block = new Block(blkid);
    return new Block(blkid, getVisibleLength(block),
        parseGenerationStamp(blockfile, metafile));
  }
View Full Code Here

   *
   * @return ongoing create threads if there is any. Otherwise, return null.
   */
  private synchronized List<Thread> tryUpdateBlock(
      Block oldblock, Block newblock) throws IOException {
    Block oldblockWildcardGS = new Block(
      oldblock.getBlockId(), oldblock.getNumBytes(),
      GenerationStamp.WILDCARD_STAMP);

    //check ongoing create threads
    ArrayList<Thread> activeThreads = getActiveThreads(oldblockWildcardGS);
View Full Code Here

            thread.interrupt();
          }
        }
        ongoingCreates.remove(b);
      }
      if (ongoingCreates.containsKey(new Block(
        b.getBlockId(), b.getNumBytes(), GenerationStamp.WILDCARD_STAMP))) {
        DataNode.LOG.error("Unexpected: wildcard ongoingCreates exists for block " + b);
      }
      FSVolume v = null;
      if (!isRecovery) {
View Full Code Here

   * Return a table of block data
   */
  public Block[] getBlockReport() {
    TreeSet<Block> blockSet = new TreeSet<Block>();
    volumes.getBlockInfo(blockSet);
    Block blockTable[] = new Block[blockSet.size()];
    int i = 0;
    for (Iterator<Block> it = blockSet.iterator(); it.hasNext(); i++) {
      blockTable[i] = it.next();
    }
    return blockTable;
View Full Code Here

    // remove related blocks
    long mlsec = System.currentTimeMillis();
    synchronized (this) {
      Iterator<Block> ib = volumeMap.keySet().iterator();
      while(ib.hasNext()) {
        Block b = ib.next();
        total_blocks ++;
        // check if the volume block belongs to still valid
        FSVolume vol = volumeMap.get(b).getVolume();
        for(FSVolume fv: failed_vols) {
          if(vol == fv) {
            DataNode.LOG.warn("Removing replica info for block " +
                b.getBlockId() + " on failed volume " +
                vol.dataDir.dir.getAbsolutePath());
            ib.remove();
            removed_blocks++;
            break;
          }
View Full Code Here

  }

  @Override
  public BlockRecoveryInfo startBlockRecovery(long blockId)
      throws IOException {   
    Block stored = getStoredBlock(blockId);

    if (stored == null) {
      return null;
    }
   
    // It's important that this loop not be synchronized - otherwise
    // this will deadlock against the thread it's joining against!
    while (true) {
      DataNode.LOG.debug(
          "Interrupting active writer threads for block " + stored);
      List<Thread> activeThreads = getActiveThreads(stored);
      if (activeThreads == null) break;
      if (interruptAndJoinThreads(activeThreads))
        break;
    }
   
    synchronized (this) {
      ActiveFile activeFile = ongoingCreates.get(stored);
      boolean isRecovery = (activeFile != null) && activeFile.wasRecoveredOnStartup;
     
     
      BlockRecoveryInfo info = new BlockRecoveryInfo(
          stored, isRecovery);
      if (DataNode.LOG.isDebugEnabled()) {
        DataNode.LOG.debug("getBlockMetaDataInfo successful block=" + stored +
                  " length " + stored.getNumBytes() +
                  " genstamp " + stored.getGenerationStamp());
      }
 
      // paranoia! verify that the contents of the stored block
      // matches the block file on disk.
      validateBlockMetadata(stored);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.Block

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.