Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.BlockLocation


         */
        if (comparator.compare(begin, end) != 0)
          endOffset = reader.getOffsetForKey(end);
      }
      int startBlockIndex = (beginOffset == 0 ? 0 : getStartBlockIndex(startOffsets, beginOffset));
      BlockLocation l;
      int endBlockIndex = (end == null ? locations.length : endOffset == -1 ?
          startBlockIndex : getEndBlockIndex(startOffsets, endOffset));
      for (int ii = startBlockIndex; ii < endBlockIndex; ii++) {
        l = locations[ii];
        long blkBeginOffset = l.getOffset();
        long blkEndOffset = blkBeginOffset + l.getLength();
        if (blkEndOffset > blkBeginOffset) {
          bd.add(l, blkEndOffset - blkBeginOffset);
        }
      }
      return;
View Full Code Here


      if (locations == null) {
        blocks = new OneBlockInfo[0];
      } else {

        if(locations.length == 0) {
          locations = new BlockLocation[] { new BlockLocation() };
        }

        if (!isSplitable) {
          // if the file is not splitable, just create the one block with
          // full file length
View Full Code Here

      DatanodeInfo d = locations[i];
      hosts[i] = d.getHost();
      names[i] = d.getName();
    }
   
    BlockLocation loc = new BlockLocation(
        names, hosts, b.getStartOffset(), b.getBlockSize());
    return new BlockInfo(loc, file);
  }
View Full Code Here

        names[hCnt] = locations[hCnt].getName();
        NodeBase node = new NodeBase(names[hCnt],
                                     locations[hCnt].getNetworkLocation());
        racks[hCnt] = node.toString();
      }
      blkLocations[idx] = new BlockLocation(names, hosts, racks,
                                            blk.getStartOffset(),
                                            blk.getBlockSize(),
                                            blk.isCorrupt());
      idx++;
    }
View Full Code Here

      String[][] hints = kfsImpl.getDataLocation(srep, start, len);
      BlockLocation[] result = new BlockLocation[hints.length];
      long blockSize = getDefaultBlockSize();
      long length = len;
      for(int i=0; i < result.length; ++i) {
        result[i] = new BlockLocation(null, hints[i], start,
                                      length < blockSize ? length : blockSize);
        length -= blockSize;
      }
      return result;
    }
View Full Code Here

      if ((blkLocations[i].getOffset() <= offset) &&
          (offset < blkLocations[i].getOffset() + blkLocations[i].getLength())){
        return i;
      }
    }
    BlockLocation last = blkLocations[blkLocations.length -1];
    long fileLength = last.getOffset() + last.getLength() -1;
    throw new IllegalArgumentException("Offset " + offset +
                                       " is outside of file (0.." +
                                       fileLength + ")");
  }
View Full Code Here

      String name = stat.getPath().toUri().getPath();
      BlockLocation[] locs =
        super.getFileBlockLocations(stat, start, len);
      if (name.equals(fileWithMissingBlocks)) {
        System.out.println("Returning missing blocks for " + fileWithMissingBlocks);
        locs[0] = new HdfsBlockLocation(new BlockLocation(new String[0],
            new String[0], locs[0].getOffset(), locs[0].getLength()), null);
      }
      return locs;
    }
View Full Code Here

      DatanodeInfo[] cachedLocations = blk.getCachedLocations();
      String[] cachedHosts = new String[cachedLocations.length];
      for (int i=0; i<cachedLocations.length; i++) {
        cachedHosts[i] = cachedLocations[i].getHostName();
      }
      blkLocations[idx] = new BlockLocation(xferAddrs, hosts, cachedHosts,
                                            racks,
                                            blk.getStartOffset(),
                                            blk.getBlockSize(),
                                            blk.isCorrupt());
      idx++;
View Full Code Here

      String[] name = {SwiftProtocolConstants.BLOCK_LOCATION};
      String[] host = { "localhost" };
      String[] topology={SwiftProtocolConstants.TOPOLOGY_PATH};
      return new BlockLocation[] {
        new BlockLocation(name, host, topology,0, file.getLen())
      };
    }

    final String[] names = new String[locations.size()];
    final String[] hosts = new String[locations.size()];
    int i = 0;
    for (URI location : locations) {
      hosts[i] = location.getHost();
      names[i] = location.getAuthority();
      i++;
    }
    return new BlockLocation[]{
            new BlockLocation(names, hosts, 0, file.getLen())
    };
  }
View Full Code Here

    @Override
    public BlockLocation[] getFileBlockLocations(Path p, long start, long len)
        throws IOException {
      return new BlockLocation[] {
          new BlockLocation(new String[] { "localhost:50010", "otherhost:50010" },
              new String[] { "localhost", "otherhost" }, new String[] { "localhost" },
              new String[0], 0, len, false) };    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.BlockLocation

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.