Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.BlockLocation


      long len) throws IOException {

    // In some places (e.g. FileInputFormat) this BlockLocation is used to
    // figure out sizes/offsets and so a completely blank one will not work.
    String [] hosts = {"DUMMY_HOST"};
    return new BlockLocation[]{new BlockLocation(null, hosts, 0, file.getLen())};
  }
View Full Code Here


        names[hCnt] = locations[hCnt].getName();
        NodeBase node = new NodeBase(names[hCnt],
                                     locations[hCnt].getNetworkLocation());
        racks[hCnt] = node.toString();
      }
      blkLocations[idx] = new BlockLocation(names, hosts, racks,
                                            blk.getStartOffset(),
                                            blk.getBlockSize());
      idx++;
    }
    return blkLocations;
View Full Code Here

      DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

      // Decommission one of the hosts with the block, this should cause
      // the block to get replicated to another host on the same rack,
      // otherwise the rack policy is violated.
      BlockLocation locs[] = fs.getFileBlockLocations(
          fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
      String name = locs[0].getNames()[0];
      DFSTestUtil.writeFile(localFileSys, excludeFile, name);
      ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
      DFSTestUtil.waitForDecommission(fs, name);
View Full Code Here

      fs.setReplication(filePath, REPLICATION_FACTOR);

      // Decommission one of the hosts with the block that is not on
      // the lone host on rack2 (if we decomission that host it would
      // be impossible to respect the rack policy).
      BlockLocation locs[] = fs.getFileBlockLocations(
          fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
      for (String top : locs[0].getTopologyPaths()) {
        if (!top.startsWith("/rack2")) {
          String name = top.substring("/rack1".length()+1);
          DFSTestUtil.writeFile(localFileSys, excludeFile, name);
View Full Code Here

  public static void waitReplication(FileSystem fs, Path fileName,
      short replFactorthrows IOException {
    boolean good;
    do {
      good = true;
      BlockLocation locs[] = fs.getFileBlockLocations(
        fs.getFileStatus(fileName), 0, Long.MAX_VALUE);
      for (int j = 0; j < locs.length; j++) {
        String[] hostnames = locs[j].getNames();
        if (hostnames.length != replFactor) {
          String hostNameList = "";
View Full Code Here

  static void waitReplication(FileSystem fs, Path fileName,
      short replFactorthrows IOException {
    boolean good;
    do {
      good = true;
      BlockLocation locs[] = fs.getFileBlockLocations(fileName, 0,
                                                      Long.MAX_VALUE);
      for (int j = 0; j < locs.length; j++) {
        String[] loc = locs[j].getHosts();
        if (loc.length != replFactor) {
          System.out.println("File " + fileName + " has replication factor " +
View Full Code Here

      String[][] hints = kfsImpl.getDataLocation(srep, start, len);
      BlockLocation[] result = new BlockLocation[hints.length];
      long blockSize = getDefaultBlockSize();
      long length = len;
      for(int i=0; i < result.length; ++i) {
        result[i] = new BlockLocation(null, hints[i], start,
                                      length < blockSize ? length : blockSize);
        length -= blockSize;
      }
      return result;
    }
View Full Code Here

      if ((blkLocations[i].getOffset() <= offset) &&
          (offset < blkLocations[i].getOffset() + blkLocations[i].getLength())){
        return i;
      }
    }
    BlockLocation last = blkLocations[blkLocations.length];
    long fileLength = last.getOffset() + last.getLength() -1;
    throw new IllegalArgumentException("Offset " + offset +
                                       " is outside of file (0.." +
                                       fileLength + ")");
  }
View Full Code Here

      for(MockFile file: files) {
        if (file.path.equals(stat.getPath())) {
          for(MockBlock block: file.blocks) {
            if (OrcInputFormat.SplitGenerator.getOverlap(block.offset,
                block.length, start, len) > 0) {
              result.add(new BlockLocation(block.hosts, block.hosts,
                  block.offset, block.length));
            }
          }
          return result.toArray(new BlockLocation[result.size()]);
        }
View Full Code Here

      DatanodeInfo[] cachedLocations = blk.getCachedLocations();
      String[] cachedHosts = new String[cachedLocations.length];
      for (int i=0; i<cachedLocations.length; i++) {
        cachedHosts[i] = cachedLocations[i].getHostName();
      }
      blkLocations[idx] = new BlockLocation(xferAddrs, hosts, cachedHosts,
                                            racks,
                                            blk.getStartOffset(),
                                            blk.getBlockSize(),
                                            blk.isCorrupt());
      idx++;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.BlockLocation

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.