Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.BlockLocation


        numLocations++;
      BlockLocation[] blockLocations = new BlockLocation[numLocations];
      for (int i = 0; i < numLocations; i++) {
        String[] names = new String[] { "b" + i };
        String[] hosts = new String[] { "host" + i };
        blockLocations[i] = new BlockLocation(names, hosts, i * splitSize,
            Math.min(splitSize, size - (splitSize * i)));
      }
      return blockLocations;
    }
View Full Code Here


        numLocations++;
      BlockLocation[] blockLocations = new BlockLocation[numLocations];
      for (int i = 0; i < numLocations; i++) {
        String[] names = new String[] { "b" + i };
        String[] hosts = new String[] { "host" + i };
        blockLocations[i] = new BlockLocation(names, hosts, i * splitSize,
            Math.min(splitSize, size - (splitSize * i)));
      }
      return blockLocations;
    }
View Full Code Here

        // We can't call directly getBlockLocations, it's not available in HFileSystem
        // We're trying multiple times to be sure, as the order is random

        BlockLocation[] bls = rfs.getFileBlockLocations(fsLog, 0, 1);
        if (bls.length > 0) {
          BlockLocation bl = bls[0];

          LOG.info(bl.getHosts().length + " replicas for block 0 in " + logFile + " ");
          for (int i = 0; i < bl.getHosts().length - 1; i++) {
            LOG.info(bl.getHosts()[i] + "    " + logFile);
            Assert.assertNotSame(bl.getHosts()[i], host4);
          }
          String last = bl.getHosts()[bl.getHosts().length - 1];
          LOG.info(last + "    " + logFile);
          if (host4.equals(last)) {
            nbTest++;
            LOG.info(logFile + " is on the new datanode and is ok");
            if (bl.getHosts().length == 3) {
              // We can test this case from the file system as well
              // Checking the underlying file system. Multiple times as the order is random
              testFromDFS(dfs, logFile, repCount, host4);

              // now from the master
View Full Code Here

      String name = stat.getPath().toUri().getPath();
      BlockLocation[] locs =
        super.getFileBlockLocations(stat, start, len);
      if (name.equals(fileWithMissingBlocks)) {
        System.out.println("Returning missing blocks for " + fileWithMissingBlocks);
        locs[0] = new HdfsBlockLocation(new BlockLocation(new String[0],
            new String[0], locs[0].getOffset(), locs[0].getLength()), null);
      }
      return locs;
    }
View Full Code Here

          else if (type.compareTo("rack") == 0)
            racks[i] = bucket.getName();
        }
      }

      blocks.add(new BlockLocation(names, hosts, racks,
            extent.getOffset(), extent.getLength()));

      curPos += extent.getLength();
    } while(curPos < endOff);
View Full Code Here

          else if (type.compareTo("rack") == 0)
            racks[i] = bucket.getName();
        }
      }

      blocks.add(new BlockLocation(names, hosts, racks,
            extent.getOffset(), extent.getLength()));

      curPos += extent.getLength();
    } while(curPos < endOff);
View Full Code Here

          && (offset < blkLocations[i].getOffset()
              + blkLocations[i].getLength())) {
        return i;
      }
    }
    BlockLocation last = blkLocations[blkLocations.length - 1];
    long fileLength = last.getOffset() + last.getLength() - 1;
    throw new IllegalArgumentException("Offset " + offset
        + " is outside of file (0.." + fileLength + ")");
  }
View Full Code Here

    }
    final long end = Math.min(file.getLen(), start + len);
    if (hints.length <= 1 || end <= start) {
      // Return an emtpy host list, as hadoop expects at least one location.
      final BlockLocation[] result = new BlockLocation[1];
      result[0] = new BlockLocation(
        null, null, start, Math.max(0L, end - start));
      return result;
    }
    final int               blkcnt =
        (int)((end - 1) / blockSize - start / blockSize + 1);
    final BlockLocation[]   result = new BlockLocation[blkcnt];
    final ArrayList<String> hlist  = new ArrayList<String>();
    long                    pos    = start - start % blockSize;
    for(int i = 0, m = 1; i < blkcnt; ++i) {
      hlist.clear();
      if (m < hints.length) {
        final String[] locs = hints[m++];
        hlist.ensureCapacity(locs.length);
        for(int k = 0; k < locs.length; ++k) {
          final int    idx  = locs[k].lastIndexOf(':');
          final String host = 0 < idx ? locs[k].substring(0, idx) : locs[k];
          if (! hlist.contains(host)) {
              hlist.add(host);
          }
        }
      }
      final long lpos = pos < start ? start : pos;
      final long bend = pos + blockSize;
      final int  hsz  = hlist.size();
      result[i] = new BlockLocation(
        null,
        hsz <= 0 ? null : hlist.toArray(new String[hsz]),
        lpos,
        (bend < end ? bend : end) - lpos
      );
View Full Code Here

  public static void waitReplication(FileSystem fs, Path fileName,
      short replFactorthrows IOException {
    boolean good;
    do {
      good = true;
      BlockLocation locs[] = fs.getFileBlockLocations(
        fs.getFileStatus(fileName), 0, Long.MAX_VALUE);
      for (int j = 0; j < locs.length; j++) {
        String[] hostnames = locs[j].getNames();
        if (hostnames.length != replFactor) {
          String hostNameList = "";
View Full Code Here

      if (locations == null) {
        blocks = new OneBlockInfo[0];
      } else {

        if(locations.length == 0) {
          locations = new BlockLocation[] { new BlockLocation() };
        }

        if (!isSplitable) {
          // if the file is not splitable, just create the one block with
          // full file length
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.BlockLocation

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.