Examples of BlockIterator


Examples of org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator

          missingBlocksInPrevIter = 0;
          return blocksToReplicate;
        }

        // Go through all blocks that need replications.
        BlockIterator neededReplicationsIterator = neededReplications
            .iterator();
        // skip to the first unprocessed block, which is at replIndex
        for (int i = 0; i < replIndex && neededReplicationsIterator.hasNext(); i++) {
          neededReplicationsIterator.next();
        }
        // # of blocks to process equals either twice the number of live
        // data-nodes or the number of under-replicated blocks whichever is less
        blocksToProcess = Math.min(blocksToProcess, neededReplications.size());

        for (int blkCnt = 0; blkCnt < blocksToProcess; blkCnt++, replIndex++) {
          if (!neededReplicationsIterator.hasNext()) {
            // start from the beginning
            replIndex = 0;
            missingBlocksInPrevIter = missingBlocksInCurIter;
            missingBlocksInCurIter = 0;
            blocksToProcess = Math.min(blocksToProcess, neededReplications
                .size());
            if (blkCnt >= blocksToProcess)
              break;
            neededReplicationsIterator = neededReplications.iterator();
            assert neededReplicationsIterator.hasNext() : "neededReplications should not be empty.";
          }

          Block block = neededReplicationsIterator.next();
          int priority = neededReplicationsIterator.getPriority();
          if (priority < 0 || priority >= blocksToReplicate.size()) {
            FSNamesystem.LOG.warn("Unexpected replication priority: "
                + priority + " " + block);
          } else {
            blocksToReplicate.get(priority).add(block);
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator

        missingBlocksInPrevIter = 0;
        return blocksToReplicate;
      }
     
      // Go through all blocks that need replications.
      BlockIterator neededReplicationsIterator = neededReplications.iterator();
      // skip to the first unprocessed block, which is at replIndex
      for(int i=0; i < replIndex && neededReplicationsIterator.hasNext(); i++) {
        neededReplicationsIterator.next();
      }
      // # of blocks to process equals either twice the number of live
      // data-nodes or the number of under-replicated blocks whichever is less
      blocksToProcess = Math.min(blocksToProcess, neededReplications.size());

      for (int blkCnt = 0; blkCnt < blocksToProcess; blkCnt++, replIndex++) {
        if( ! neededReplicationsIterator.hasNext()) {
          // start from the beginning
          replIndex = 0;
          missingBlocksInPrevIter = missingBlocksInCurIter;
          missingBlocksInCurIter = 0;
          blocksToProcess = Math.min(blocksToProcess, neededReplications.size());
          if(blkCnt >= blocksToProcess)
            break;
          neededReplicationsIterator = neededReplications.iterator();
          assert neededReplicationsIterator.hasNext() :
                                  "neededReplications should not be empty.";
        }

        Block block = neededReplicationsIterator.next();
        int priority = neededReplicationsIterator.getPriority();
        if (priority < 0 || priority >= blocksToReplicate.size()) {
          LOG.warn("Unexpected replication priority: " + priority + " " + block);
        } else {
          blocksToReplicate.get(priority).add(block);
        }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator

        missingBlocksInPrevIter = 0;
        return blocksToReplicate;
      }
     
      // Go through all blocks that need replications.
      BlockIterator neededReplicationsIterator = neededReplications.iterator();
      // skip to the first unprocessed block, which is at replIndex
      for(int i=0; i < replIndex && neededReplicationsIterator.hasNext(); i++) {
        neededReplicationsIterator.next();
      }
      // # of blocks to process equals either twice the number of live
      // data-nodes or the number of under-replicated blocks whichever is less
      blocksToProcess = Math.min(blocksToProcess, neededReplications.size());

      for (int blkCnt = 0; blkCnt < blocksToProcess; blkCnt++, replIndex++) {
        if( ! neededReplicationsIterator.hasNext()) {
          // start from the beginning
          replIndex = 0;
          missingBlocksInPrevIter = missingBlocksInCurIter;
          missingBlocksInCurIter = 0;
          blocksToProcess = Math.min(blocksToProcess, neededReplications.size());
          if(blkCnt >= blocksToProcess)
            break;
          neededReplicationsIterator = neededReplications.iterator();
          assert neededReplicationsIterator.hasNext() :
                                  "neededReplications should not be empty.";
        }

        Block block = neededReplicationsIterator.next();
        int priority = neededReplicationsIterator.getPriority();
        if (priority < 0 || priority >= blocksToReplicate.size()) {
          LOG.warn("Unexpected replication priority: " + priority + " " + block);
        } else {
          blocksToReplicate.get(priority).add(block);
        }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator

        missingBlocksInPrevIter = 0;
        return blocksToReplicate;
      }
     
      // Go through all blocks that need replications.
      BlockIterator neededReplicationsIterator = neededReplications.iterator();
      // skip to the first unprocessed block, which is at replIndex
      for(int i=0; i < replIndex && neededReplicationsIterator.hasNext(); i++) {
        neededReplicationsIterator.next();
      }
      // # of blocks to process equals either twice the number of live
      // data-nodes or the number of under-replicated blocks whichever is less
      blocksToProcess = Math.min(blocksToProcess, neededReplications.size());

      for (int blkCnt = 0; blkCnt < blocksToProcess; blkCnt++, replIndex++) {
        if( ! neededReplicationsIterator.hasNext()) {
          // start from the beginning
          replIndex = 0;
          missingBlocksInPrevIter = missingBlocksInCurIter;
          missingBlocksInCurIter = 0;
          blocksToProcess = Math.min(blocksToProcess, neededReplications.size());
          if(blkCnt >= blocksToProcess)
            break;
          neededReplicationsIterator = neededReplications.iterator();
          assert neededReplicationsIterator.hasNext() :
                                  "neededReplications should not be empty.";
        }

        Block block = neededReplicationsIterator.next();
        int priority = neededReplicationsIterator.getPriority();
        if (priority < 0 || priority >= blocksToReplicate.size()) {
          LOG.warn("Unexpected replication priority: " + priority + " " + block);
        } else {
          blocksToReplicate.get(priority).add(block);
        }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator

        missingBlocksInPrevIter = 0;
        return blocksToReplicate;
      }
     
      // Go through all blocks that need replications.
      BlockIterator neededReplicationsIterator = neededReplications.iterator();
      // skip to the first unprocessed block, which is at replIndex
      for(int i=0; i < replIndex && neededReplicationsIterator.hasNext(); i++) {
        neededReplicationsIterator.next();
      }
      // # of blocks to process equals either twice the number of live
      // data-nodes or the number of under-replicated blocks whichever is less
      blocksToProcess = Math.min(blocksToProcess, neededReplications.size());

      for (int blkCnt = 0; blkCnt < blocksToProcess; blkCnt++, replIndex++) {
        if( ! neededReplicationsIterator.hasNext()) {
          // start from the beginning
          replIndex = 0;
          missingBlocksInPrevIter = missingBlocksInCurIter;
          missingBlocksInCurIter = 0;
          blocksToProcess = Math.min(blocksToProcess, neededReplications.size());
          if(blkCnt >= blocksToProcess)
            break;
          neededReplicationsIterator = neededReplications.iterator();
          assert neededReplicationsIterator.hasNext() :
                                  "neededReplications should not be empty.";
        }

        Block block = neededReplicationsIterator.next();
        int priority = neededReplicationsIterator.getPriority();
        if (priority < 0 || priority >= blocksToReplicate.size()) {
          LOG.warn("Unexpected replication priority: " + priority + " " + block);
        } else {
          blocksToReplicate.get(priority).add(block);
        }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator

        missingBlocksInPrevIter = 0;
        return blocksToReplicate;
      }
     
      // Go through all blocks that need replications.
      BlockIterator neededReplicationsIterator = neededReplications.iterator();
      // skip to the first unprocessed block, which is at replIndex
      for(int i=0; i < replIndex && neededReplicationsIterator.hasNext(); i++) {
        neededReplicationsIterator.next();
      }
      // # of blocks to process equals either twice the number of live
      // data-nodes or the number of under-replicated blocks whichever is less
      blocksToProcess = Math.min(blocksToProcess, neededReplications.size());

      for (int blkCnt = 0; blkCnt < blocksToProcess; blkCnt++, replIndex++) {
        if( ! neededReplicationsIterator.hasNext()) {
          // start from the beginning
          replIndex = 0;
          missingBlocksInPrevIter = missingBlocksInCurIter;
          missingBlocksInCurIter = 0;
          blocksToProcess = Math.min(blocksToProcess, neededReplications.size());
          if(blkCnt >= blocksToProcess)
            break;
          neededReplicationsIterator = neededReplications.iterator();
          assert neededReplicationsIterator.hasNext() :
                                  "neededReplications should not be empty.";
        }

        Block block = neededReplicationsIterator.next();
        int priority = neededReplicationsIterator.getPriority();
        if (priority < 0 || priority >= blocksToReplicate.size()) {
          LOG.warn("Unexpected replication priority: " + priority + " " + block);
        } else {
          blocksToReplicate.get(priority).add(block);
        }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator

    ArrayList<CorruptFileBlockInfo> corruptFiles = new ArrayList<CorruptFileBlockInfo>();
   
    if (startBlockAfter != null) {
      startBlockId = Block.filename2id(startBlockAfter);
    }
    BlockIterator blkIterator = blockManager.getCorruptReplicaBlockIterator();
    while (blkIterator.hasNext()) {
      Block blk = blkIterator.next();
      INode inode = blockManager.getINode(blk);
      if (inode != null && blockManager.countNodes(blk).liveReplicas() == 0) {
        String src = FSDirectory.getFullPathName(inode);
        if (((startBlockAfter == null) || (blk.getBlockId() > startBlockId))
            && (src.startsWith(path))) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator

          missingBlocksInPrevIter = 0;
          return blocksToReplicate;
        }

        // Go through all blocks that need replications.
        BlockIterator neededReplicationsIterator = neededReplications
            .iterator();
        // skip to the first unprocessed block, which is at replIndex
        for (int i = 0; i < replIndex && neededReplicationsIterator.hasNext(); i++) {
          neededReplicationsIterator.next();
        }
        // # of blocks to process equals either twice the number of live
        // data-nodes or the number of under-replicated blocks whichever is less
        blocksToProcess = Math.min(blocksToProcess, neededReplications.size());

        for (int blkCnt = 0; blkCnt < blocksToProcess; blkCnt++, replIndex++) {
          if (!neededReplicationsIterator.hasNext()) {
            // start from the beginning
            replIndex = 0;
            missingBlocksInPrevIter = missingBlocksInCurIter;
            missingBlocksInCurIter = 0;
            blocksToProcess = Math.min(blocksToProcess, neededReplications
                .size());
            if (blkCnt >= blocksToProcess)
              break;
            neededReplicationsIterator = neededReplications.iterator();
            assert neededReplicationsIterator.hasNext() : "neededReplications should not be empty.";
          }

          Block block = neededReplicationsIterator.next();
          int priority = neededReplicationsIterator.getPriority();
          if (priority < 0 || priority >= blocksToReplicate.size()) {
            FSNamesystem.LOG.warn("Unexpected replication priority: "
                + priority + " " + block);
          } else {
            blocksToReplicate.get(priority).add(block);
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator

        missingBlocksInPrevIter = 0;
        return blocksToReplicate;
      }
     
      // Go through all blocks that need replications.
      BlockIterator neededReplicationsIterator = neededReplications.iterator();
      // skip to the first unprocessed block, which is at replIndex
      for(int i=0; i < replIndex && neededReplicationsIterator.hasNext(); i++) {
        neededReplicationsIterator.next();
      }
      // # of blocks to process equals either twice the number of live
      // data-nodes or the number of under-replicated blocks whichever is less
      blocksToProcess = Math.min(blocksToProcess, neededReplications.size());

      for (int blkCnt = 0; blkCnt < blocksToProcess; blkCnt++, replIndex++) {
        if( ! neededReplicationsIterator.hasNext()) {
          // start from the beginning
          replIndex = 0;
          missingBlocksInPrevIter = missingBlocksInCurIter;
          missingBlocksInCurIter = 0;
          blocksToProcess = Math.min(blocksToProcess, neededReplications.size());
          if(blkCnt >= blocksToProcess)
            break;
          neededReplicationsIterator = neededReplications.iterator();
          assert neededReplicationsIterator.hasNext() :
                                  "neededReplications should not be empty.";
        }

        Block block = neededReplicationsIterator.next();
        int priority = neededReplicationsIterator.getPriority();
        if (priority < 0 || priority >= blocksToReplicate.size()) {
          LOG.warn("Unexpected replication priority: " + priority + " " + block);
        } else {
          blocksToReplicate.get(priority).add(block);
        }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator

          return blocksToReplicate;
        }

        for (int priority = 0; priority<UnderReplicatedBlocks.LEVEL; priority++) {
        // Go through all blocks that need replications of priority
        BlockIterator neededReplicationsIterator = neededReplications.iterator(priority);
        int numBlocks = neededReplications.size(priority);
        if (replIndex[priority] > numBlocks) {
          replIndex[priority] = 0;
        }
        // skip to the first unprocessed block, which is at replIndex
        for (int i = 0; i < replIndex[priority] && neededReplicationsIterator.hasNext(); i++) {
          neededReplicationsIterator.next();
        }
        // # of blocks to process for this priority
        int blocksToProcessIter = getQuotaForThisPriority(blocksToProcess,
            numBlocks, neededReplications.getSize(priority+1));
        blocksToProcess -= blocksToProcessIter;

        for (int blkCnt = 0; blkCnt < blocksToProcessIter; blkCnt++, replIndex[priority]++) {
          if (!neededReplicationsIterator.hasNext()) {
            // start from the beginning
            replIndex[priority] = 0;
            neededReplicationsIterator = neededReplications.iterator(priority);
            assert neededReplicationsIterator.hasNext() :
              "neededReplications should not be empty.";
          }

          Block block = neededReplicationsIterator.next();
          blocksToReplicate.get(priority).add(block);
        } // end for
        }
      } // end try
      return blocksToReplicate;
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.