Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.LocatedBlock


    Block ret;
      List<LocatedBlock> lbs =
        cluster.getNameNodeRpc()
        .getBlockLocations(path.toString(),
          FILE_START, size).getLocatedBlocks();
      LocatedBlock lb = lbs.get(lbs.size() - 1);

      // Get block from the first DN
      ret = cluster.getDataNodes().get(DN_N0).
        data.getStoredBlock(lb.getBlock()
        .getBlockPoolId(), lb.getBlock().getBlockId());
    return ret;
  }
View Full Code Here


      }
      // create chains
      int chain = 0;
      boolean copyError = false;
      for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
        LocatedBlock lblock = lBlk;
        DatanodeInfo[] locs = lblock.getLocations();
        if (locs == null || locs.length == 0) {
          if (fos != null) {
            fos.flush();
            fos.close();
            fos = null;
          }
          continue;
        }
        if (fos == null) {
          fos = dfs.create(target + "/" + chain, true);
          if (fos == null) {
            throw new IOException("Failed to copy " + fullName +
                " to /lost+found: could not store chain " + chain);
          }
          chain++;
        }
       
        // copy the block. It's a pity it's not abstracted from DFSInputStream ...
        try {
          copyBlock(dfs, lblock, fos);
        } catch (Exception e) {
          LOG.error("Fsck: could not copy block " + lblock.getBlock() +
              " to " + target, e);
          fos.flush();
          fos.close();
          fos = null;
          internalError = true;
View Full Code Here

      return;
    }
    DatanodeInfo[] dnArr = { new DatanodeInfo(bpRegistration) };
    String[] uuids = { storageUuid };
    StorageType[] types = { storageType };
    LocatedBlock[] blocks = { new LocatedBlock(block, dnArr, uuids, types) };
   
    try {
      bpNamenode.reportBadBlocks(blocks)
    } catch (IOException e){
      /* One common reason is that NameNode could be in safe mode.
View Full Code Here

  /**
   * Report a bad block from another DN in this cluster.
   */
  void reportRemoteBadBlock(DatanodeInfo dnInfo, ExtendedBlock block)
      throws IOException {
    LocatedBlock lb = new LocatedBlock(block,
                                    new DatanodeInfo[] {dnInfo});
    bpNamenode.reportBadBlocks(new LocatedBlock[] {lb});
  }
View Full Code Here

    // Now abandon the last block
    DFSClient dfsclient = DFSClientAdapter.getDFSClient(fs);
    LocatedBlocks blocks =
      dfsclient.getNamenode().getBlockLocations(src, 0, Integer.MAX_VALUE);
    int orginalNumBlocks = blocks.locatedBlockCount();
    LocatedBlock b = blocks.getLastLocatedBlock();
    dfsclient.getNamenode().abandonBlock(b.getBlock(), fileId, src,
        dfsclient.clientName);
   
    // call abandonBlock again to make sure the operation is idempotent
    dfsclient.getNamenode().abandonBlock(b.getBlock(), fileId, src,
        dfsclient.clientName);

    // And close the file
    fout.close();
View Full Code Here

    // Locate the file blocks by asking name node
    final LocatedBlocks locatedblocks = dfs.dfs.getNamenode()
        .getBlockLocations(filePath.toString(), 0L, BLOCK_SIZE);
    Assert.assertEquals(repl, locatedblocks.get(0).getLocations().length);
    // The file only has one block
    LocatedBlock lblock = locatedblocks.get(0);
    DatanodeInfo[] datanodeinfos = lblock.getLocations();
    ExtendedBlock block = lblock.getBlock();
    // corrupt some /all of the block replicas
    for (int i = 0; i < corruptBlockCount; i++) {
      DatanodeInfo dninfo = datanodeinfos[i];
      final DataNode dn = cluster.getDataNode(dninfo.getIpcPort());
      corruptBlock(block, dn);
View Full Code Here

  private void verifyFirstBlockCorrupted(Path filePath, boolean isCorrupted)
      throws AccessControlException, FileNotFoundException,
      UnresolvedLinkException, IOException {
    final LocatedBlocks locatedBlocks = dfs.dfs.getNamenode()
        .getBlockLocations(filePath.toUri().getPath(), 0, Long.MAX_VALUE);
    final LocatedBlock firstLocatedBlock = locatedBlocks.get(0);
    Assert.assertEquals(isCorrupted, firstLocatedBlock.isCorrupt());
  }
View Full Code Here

      throws AccessControlException, FileNotFoundException,
      UnresolvedLinkException, IOException {
    final LocatedBlocks lBlocks = dfs.dfs.getNamenode().getBlockLocations(
        filePath.toUri().getPath(), 0, Long.MAX_VALUE);
    // we expect only the first block of the file is used for this test
    LocatedBlock firstLocatedBlock = lBlocks.get(0);
    Assert.assertEquals(expectedReplicas,
        firstLocatedBlock.getLocations().length);
  }
View Full Code Here

    DatanodeInfo[] ds = new DatanodeInfo[1];
    ds[0] = d;

    // ok
    ExtendedBlock b1 = new ExtendedBlock("bpid", 1, 1, 1);
    LocatedBlock l1 = new LocatedBlock(b1, ds, 0, false);

    // corrupt
    ExtendedBlock b2 = new ExtendedBlock("bpid", 2, 1, 1);
    LocatedBlock l2 = new LocatedBlock(b2, ds, 0, true);

    List<LocatedBlock> ls = Arrays.asList(l1, l2);
    LocatedBlocks lbs = new LocatedBlocks(10, false, ls, l2, true);

    BlockLocation[] bs = DFSUtil.locatedBlocks2Locations(lbs);
View Full Code Here

    DatanodeInfo d = DFSTestUtil.getLocalDatanodeInfo();
    DatanodeInfo[] ds = new DatanodeInfo[1];
    ds[0] = d;
   
    ExtendedBlock b1 = new ExtendedBlock("bpid", 1, 1, 1);
    LocatedBlock l1 = new LocatedBlock(b1, ds, null, null, 0, false, null);
    final DatanodeInfo[] cachedLocs = l1.getCachedLocations();
    assertTrue(cachedLocs.length == 0);
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.LocatedBlock

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.