Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.Block


    long bw = Math.max(bytesLeft*1000/timeLeft, MIN_SCAN_RATE);
    throttler.setBandwidth(Math.min(bw, MAX_SCAN_RATE));
  }
 
  private void verifyBlock(BlockScanInfo blockinfo) {
    Block block = blockinfo.block;
    BlockSender blockSender = null;

    /* In case of failure, attempt to read second time to reduce
     * transient errors. How do we flush block data from kernel
     * buffers before the second read?
View Full Code Here


          }
          LogEntry entry = LogEntry.parseEntry(reader.next());
          if (entry != null) {
            updateBlockInfo(entry);
            if (now - entry.verificationTime < scanPeriod) {
              BlockScanInfo info = blockMap.get(new Block(entry.blockId, 0,
                  entry.genStamp));
              if (info != null) {
                if (!processedBlocks.contains(entry.blockId)) {
                  bytesLeft-=info.block.getNumBytes();
                  processedBlocks.add(entry.blockId);
View Full Code Here

        if (!blockFiles[i].isDirectory()) {
        // get each block in the rbwDir direcotry
          if (Block.isBlockFilename(blockFileNames[i])) {
            long genStamp = FSDataset.getGenerationStampFromFile(
                blockFileNames, blockFileNames[i]);
            Block block =
              new Block(blockFiles[i], blockFiles[i].length(), genStamp);
           
            // add this block to block set
            blockSet.add(block);
            if (DataNode.LOG.isDebugEnabled()) {
              DataNode.LOG.debug("recoverBlocksBeingWritten for block " + block);
View Full Code Here

          } else if (Block.isBlockFilename(fileName)) {
            numBlocks++;
            if (volume != null) {  
              long blkSize = file.length();
              long genStamp = FSDataset.getGenerationStampFromFile(filesNames, fileName);   
                volumeMap.add(namespaceId, new Block(file, blkSize, genStamp),    
                  new DatanodeBlockInfo(volume, file, blkSize));  
            }
          }
        }
        if (numChildren > 0) {
View Full Code Here

     
      for (int i = 0; i < blockFiles.length; i++) {
        if (Block.isBlockFilename(blockFilesNames[i])) {
          long genStamp = FSDataset.getGenerationStampFromFile(blockFilesNames,
              blockFilesNames[i]);
          blockSet.add(new Block(blockFiles[i], blockFiles[i].length(), genStamp));
        }
      }
    }
View Full Code Here

      String[] blockFilesNames = getFileNames(blockFiles);     
      for (int i = 0; i < blockFiles.length; i++) {
        if (Block.isBlockFilename(blockFilesNames[i])) {
          long genStamp = FSDataset.getGenerationStampFromFile(blockFilesNames,
              blockFilesNames[i]);
          Block block = new Block(blockFiles[i], blockFiles[i].length(), genStamp);
          blockSet.add(new BlockAndFile(blockFiles[i].getAbsoluteFile(), block));
        }
      }
    }
View Full Code Here

  /** Return the block file for the given ID */
  public File findBlockFile(int namespaceId, long blockId) {
    lock.readLock().lock();
    try {
      final Block eb = new Block(blockId);
      File blockfile = null;
      ActiveFile activefile = volumeMap.getOngoingCreates(namespaceId, eb);
      if (activefile != null) {
        blockfile = activefile.file;
      }
View Full Code Here

      }
      File metafile = findMetaFile(blockfile, true);
      if (metafile == null) {
        return null;
      }
      Block block = new Block(blkid);
      if (useOnDiskLength) {
        block.setNumBytes(getOnDiskLength(namespaceId, block));
      } else {
        block.setNumBytes(getVisibleLength(namespaceId, block));
      }
      block.setGenerationStamp(parseGenerationStamp(blockfile, metafile));
      return block;
    } finally {
      lock.readLock().unlock();
    }
  }
View Full Code Here

   * @throws IOException
  */
  public Block[] getBlocksBeingWrittenReport(int namespaceId) throws IOException {
    LightWeightHashSet<Block> blockSet = new LightWeightHashSet<Block>();
    volumes.getBlocksBeingWrittenInfo(namespaceId, blockSet);
    Block blockTable[] = new Block[blockSet.size()];
    int i = 0;
      for (Iterator<Block> it = blockSet.iterator(); it.hasNext(); i++) {
    blockTable[i] = it.next();
    }
    return blockTable;
View Full Code Here

    // getBlockReport doesn't grant the global lock as we believe it is
    // OK to get some inconsistent partial results. The inconsistent
    // information will finally be fixed by the next incremental
    LightWeightHashSet<Block> blockSet = new LightWeightHashSet<Block>();
    volumes.getBlockInfo(namespaceId, blockSet);
    Block blockTable[] = new Block[blockSet.size()];
    int i = 0;
    for (Iterator<Block> it = blockSet.iterator(); it.hasNext(); i++) {
      blockTable[i] = it.next();
    }
    return blockTable;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.Block

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.