Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.Block


        throw new IOException("invalid number of blocks: " + numBlocks +
            ".  The maximum number of blocks per file is " + MAX_BLOCKS);
      }
      Block[] blocks = new Block[numBlocks];
      for (int i = 0; i < numBlocks; i++) {
        Block blk = new Block();
        blk.readFields(in);
        blocks[i] = blk;
      }
      return blocks;
    }
View Full Code Here


  public static Block blockFromXml(Stanza st)
      throws InvalidXmlException {
    long blockId = Long.parseLong(st.getValue("BLOCK_ID"));
    long numBytes = Long.parseLong(st.getValue("NUM_BYTES"));
    long generationStamp = Long.parseLong(st.getValue("GENSTAMP"));
    return new Block(blockId, numBytes, generationStamp);
  }
View Full Code Here

   * Add a new block into the given INodeFile
   */
  private void addNewBlock(FSDirectory fsDir, AddBlockOp op, INodeFile file)
      throws IOException {
    BlockInfo[] oldBlocks = file.getBlocks();
    Block pBlock = op.getPenultimateBlock();
    Block newBlock= op.getLastBlock();
   
    if (pBlock != null) { // the penultimate block is not null
      Preconditions.checkState(oldBlocks != null && oldBlocks.length > 0);
      // compare pBlock with the last block of oldBlocks
      Block oldLastBlock = oldBlocks[oldBlocks.length - 1];
      if (oldLastBlock.getBlockId() != pBlock.getBlockId()
          || oldLastBlock.getGenerationStamp() != pBlock.getGenerationStamp()) {
        throw new IOException(
            "Mismatched block IDs or generation stamps for the old last block of file "
                + op.getPath() + ", the old last block is " + oldLastBlock
                + ", and the block read from editlog is " + pBlock);
      }
     
      oldLastBlock.setNumBytes(pBlock.getNumBytes());
      if (oldLastBlock instanceof BlockInfoUnderConstruction) {
        fsNamesys.getBlockManager().forceCompleteBlock(file,
            (BlockInfoUnderConstruction) oldLastBlock);
        fsNamesys.getBlockManager().processQueuedMessagesForBlock(pBlock);
      }
View Full Code Here

    actor = bpos.getBPServiceActors().get(0);
    storageUuid = singletonDn.getFSDataset().getVolumes().get(0).getStorageID();
  }

  private static Block getDummyBlock() {
    return new Block(DUMMY_BLOCK_ID, DUMMY_BLOCK_LENGTH, DUMMY_BLOCK_GENSTAMP);
  }
View Full Code Here

    private boolean finalized = false; // if not finalized => ongoing creation
    SimulatedOutputStream oStream = null;
    private long bytesAcked;
    private long bytesRcvd;
    BInfo(String bpid, Block b, boolean forWriting) throws IOException {
      theBlock = new Block(b);
      if (theBlock.getNumBytes() < 0) {
        theBlock.setNumBytes(0);
      }
      if (!storage.alloc(bpid, theBlock.getNumBytes())) {
        // expected length - actual length may
View Full Code Here

      LOG.debug("Number of blocks allocated " + blocks.size());
    }
    long[] oldLengths = new long[blocks.size()];
    int tempLen;
    for (int i = 0; i < blocks.size(); i++) {
      Block b = blocks.get(i);
      if(LOG.isDebugEnabled()) {
        LOG.debug("Block " + b.getBlockName() + " before\t" + "Size " +
            b.getNumBytes());
      }
      oldLengths[i] = b.getNumBytes();
      if(LOG.isDebugEnabled()) {
        LOG.debug("Setting new length");
      }
      tempLen = rand.nextInt(BLOCK_SIZE);
      b.set(b.getBlockId(), tempLen, b.getGenerationStamp());
      if(LOG.isDebugEnabled()) {
        LOG.debug("Block " + b.getBlockName() + " after\t " + "Size " +
            b.getNumBytes());
      }
    }
    // all blocks belong to the same file, hence same BP
    DataNode dn = cluster.getDataNodes().get(DN_N0);
    String poolId = cluster.getNamesystem().getBlockPoolId();
    DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
    StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
    sendBlockReports(dnR, poolId, reports);

    List<LocatedBlock> blocksAfterReport =
      DFSTestUtil.getAllBlocks(fs.open(filePath));

    if(LOG.isDebugEnabled()) {
      LOG.debug("After mods: Number of blocks allocated " +
          blocksAfterReport.size());
    }

    for (int i = 0; i < blocksAfterReport.size(); i++) {
      ExtendedBlock b = blocksAfterReport.get(i).getBlock();
      assertEquals("Length of " + i + "th block is incorrect",
        oldLengths[i], b.getNumBytes());
    }
  }
View Full Code Here

    startUpCluster();

    try {
      ArrayList<Block> blocks =
        writeFile(METHOD_NAME, 12 * bytesChkSum, filePath);
      Block bl = findBlock(filePath, 12 * bytesChkSum);
      BlockChecker bc = new BlockChecker(filePath);
      bc.start();

      waitForTempReplica(bl, DN_N1);

View Full Code Here

    // write file and start second node to be "older" than the original

    try {
      writeFile(METHOD_NAME, 12 * bytesChkSum, filePath);

      Block bl = findBlock(filePath, 12 * bytesChkSum);
      BlockChecker bc = new BlockChecker(filePath);
      bc.start();

      waitForTempReplica(bl, DN_N1);

View Full Code Here

        if(LOG.isDebugEnabled()) {
          LOG.debug(i + " block to be omitted");
        }
        continue;
      }
      newList.add(new Block(locatedBlks.get(i).getBlock().getLocalBlock()));
    }
    return newList;
  }
View Full Code Here

    ((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL);
    ((Log4JLogger) BlockReportTestBase.LOG).getLogger().setLevel(Level.ALL);
  }

  private Block findBlock(Path path, long size) throws IOException {
    Block ret;
      List<LocatedBlock> lbs =
        cluster.getNameNodeRpc()
        .getBlockLocations(path.toString(),
          FILE_START, size).getLocatedBlocks();
      LocatedBlock lb = lbs.get(lbs.size() - 1);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.Block

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.