Package org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks

Examples of org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator


        // print a limited # of corrupt files per call
        int count = 0;
        ArrayList<CorruptFileBlockInfo> corruptFiles =
          new ArrayList<CorruptFileBlockInfo>();

        BlockIterator blkIterator = null;
        if (decommissioningOnly) {
          blkIterator = neededReplications.iterator(0);
        } else {
          blkIterator = getCorruptReplicaBlockIterator();
        }
       
        if (cookieTab == null) {
          cookieTab = new String[] { null };
        }
        int skip = getIntCookie(cookieTab[0]);
        for(int i = 0; i < skip && blkIterator.hasNext(); i++) {
          blkIterator.next();
        }
       
        while (blkIterator.hasNext()) {
          Block blk = blkIterator.next();
          INode inode = blocksMap.getINode(blk);
          skip++;
          if (inode != null) {
            String src = FSDirectory.getFullPathName(inode);
            if (src.startsWith(path)) {
View Full Code Here


          return blocksToReplicate;
        }

        for (int priority = 0; priority<UnderReplicatedBlocks.LEVEL; priority++) {
        // Go through all blocks that need replications of priority
        BlockIterator neededReplicationsIterator = neededReplications.iterator(priority);
        int numBlocks = neededReplications.size(priority);
        if (replIndex[priority] > numBlocks) {
          replIndex[priority] = 0;
        }
        // skip to the first unprocessed block, which is at replIndex
        for (int i = 0; i < replIndex[priority] && neededReplicationsIterator.hasNext(); i++) {
          neededReplicationsIterator.next();
        }
        // # of blocks to process for this priority
        int blocksToProcessIter = getQuotaForThisPriority(blocksToProcess,
            numBlocks, neededReplications.getSize(priority+1));
        blocksToProcess -= blocksToProcessIter;

        for (int blkCnt = 0; blkCnt < blocksToProcessIter; blkCnt++, replIndex[priority]++) {
          if (!neededReplicationsIterator.hasNext()) {
            // start from the beginning
            replIndex[priority] = 0;
            neededReplicationsIterator = neededReplications.iterator(priority);
            assert neededReplicationsIterator.hasNext() :
              "neededReplications should not be empty.";
          }

          BlockInfo block = neededReplicationsIterator.next();
          blocksToReplicate.get(priority).add(block);
        } // end for
        }
      } // end try
      return blocksToReplicate;
View Full Code Here

        // print a limited # of corrupt files per call
        int count = 0;
        ArrayList<CorruptFileBlockInfo> corruptFiles =
          new ArrayList<CorruptFileBlockInfo>();

        BlockIterator blkIterator = null;
        if (decommissioningOnly) {
          blkIterator = neededReplications.iterator(0);
        } else {
          blkIterator = getCorruptReplicaBlockIterator();
        }
       
        if (cookieTab == null) {
          cookieTab = new String[] { null };
        }
        int skip = getIntCookie(cookieTab[0]);
        for(int i = 0; i < skip && blkIterator.hasNext(); i++) {
          blkIterator.next();
        }
       
        while (blkIterator.hasNext()) {
          Block blk = blkIterator.next();
          INode inode = blocksMap.getINode(blk);
          skip++;
          if (inode != null) {
            try {
              String src = FSDirectory.getFullPathName(inode);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.