Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.LocatedBlocksWithMetaInfo


    if (locatedBlocks == null) {
      throw new IOException("Null block locations, mostly because non-existent file " + src);
    }
    int namespaceId = 0;
    if (locatedBlocks instanceof LocatedBlocksWithMetaInfo) {
      LocatedBlocksWithMetaInfo lBlocks = (LocatedBlocksWithMetaInfo)locatedBlocks;
      dataTransferVersion = lBlocks.getDataProtocolVersion();
      namespaceId = lBlocks.getNamespaceID();
    } else if (dataTransferVersion == -1) {
      dataTransferVersion = namenode.getDataTransferProtocolVersion();
    }
    final List<LocatedBlock> locatedblocks  = locatedBlocks.getLocatedBlocks();
    int fileCrc = 0;
View Full Code Here


    if (locatedBlocks == null) {
      throw new IOException("Null block locations, mostly because non-existent file " + src);
    }
    int namespaceId = 0;
    if (locatedBlocks instanceof LocatedBlocksWithMetaInfo) {
      LocatedBlocksWithMetaInfo lBlocks = (LocatedBlocksWithMetaInfo)locatedBlocks;
      dataTransferVersion = lBlocks.getDataProtocolVersion();
      namespaceId = lBlocks.getNamespaceID();
    } else if (dataTransferVersion == -1) {
      dataTransferVersion = namenode.getDataTransferProtocolVersion();
    }
    final List<LocatedBlock> locatedblocks  = locatedBlocks.getLocatedBlocks();
    final DataOutputBuffer md5out = new DataOutputBuffer();
View Full Code Here

    this.out = checkAndMarkRunningBalancer();
    if (out == null) {
      throw new IOException("Another balancer is running");
    }
    // get namespace id
    LocatedBlocksWithMetaInfo locations = client.openAndFetchMetaInfo(BALANCER_ID_PATH.toString(), 0L, 1L);
    this.namespaceId = locations.getNamespaceID();
  }
View Full Code Here

        int srcNamespaceId = 0;
        boolean supportFederation = false;
        if (srcNamenodeProtocolProxy.isMethodSupported(
            "openAndFetchMetaInfo", String.class, long.class, long.class)) {
          supportFederation = true;
          LocatedBlocksWithMetaInfo srcBlockWithMetaInfo =
            srcNamenode.openAndFetchMetaInfo(src, 0, Long.MAX_VALUE);
          srcNamespaceId = srcBlockWithMetaInfo.getNamespaceID();
          srcLocatedBlks = srcBlockWithMetaInfo;
        } else {
          srcLocatedBlks = srcNamenode.getBlockLocations(src, 0, Long.MAX_VALUE);
        }
        List<LocatedBlock> locatedBlocks = srcLocatedBlks.getLocatedBlocks();
View Full Code Here

   * Updates DatanodeInfo for each LocatedBlock in locatedBlocks.
   */
  LocatedBlocksWithMetaInfo updateDatanodeInfo(LocatedBlocks locatedBlocks)
      throws IOException {
    if (locatedBlocks.getLocatedBlocks().size() == 0)
      return new LocatedBlocksWithMetaInfo(locatedBlocks.getFileLength(),
          locatedBlocks.getLocatedBlocks(), false,
          DataTransferProtocol.DATA_TRANSFER_VERSION, getNamespaceId(),
          this.nameNode.getClientProtocolMethodsFingerprint());
    List<LocatedBlock> newBlocks = new ArrayList<LocatedBlock>();

    readLock();
    try {
      for (LocatedBlock locBlock: locatedBlocks.getLocatedBlocks()) {
        Block block = locBlock.getBlock();
        int numNodes = blocksMap.numNodes(block);
        int numCorruptNodes = countNodes(block).corruptReplicas();
        int numCorruptReplicas = corruptReplicas.numCorruptReplicas(block);

        if (numCorruptNodes != numCorruptReplicas) {
          LOG.warn("Inconsistent number of corrupt replicas for " +
                   block + "blockMap has " + numCorruptNodes +
                   " but corrupt replicas map has " + numCorruptReplicas);
        }

        boolean blockCorrupt = numCorruptNodes == numNodes;
        int numMachineSet = blockCorrupt ? numNodes : (numNodes - numCorruptNodes);
        DatanodeDescriptor[] machineSet = new DatanodeDescriptor[numMachineSet];

        if (numMachineSet > 0) {
          numNodes = 0;
          for(Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(block);
              it.hasNext();) {
            DatanodeDescriptor dn = it.next();
            boolean replicaCorrupt = corruptReplicas.isReplicaCorrupt(block, dn);
            if (blockCorrupt || (!blockCorrupt && !replicaCorrupt))
              machineSet[numNodes++] = dn;
          }
        }

        // We need to make a copy of the block object before releasing the lock
        // to prevent the state of block is changed after that and before the
        // object is serialized to clients, to avoid potential inconsistency.
        // Further optimization is possible to avoid some object copy. Since it
        // is so far not a critical path. We leave a safe approach here.
        //
        Block blockCopy  = null;
        if (block != null) {
          blockCopy = new Block(block);
        }
        LocatedBlock newBlock = new LocatedBlock(blockCopy, machineSet, 0,
            blockCorrupt);
  newBlocks.add(newBlock);
      }
    } finally {
      readUnlock();
    }

    return new LocatedBlocksWithMetaInfo(locatedBlocks.getFileLength(),
        newBlocks, false, DataTransferProtocol.DATA_TRANSFER_VERSION,
        getNamespaceId(), this.nameNode.getClientProtocolMethodsFingerprint());
  }
View Full Code Here

 
  LocatedBlocks createLocatedBlocks(List<LocatedBlock> blocks,
      BlockMetaInfoType type,int namespaceid, int methodsFingerprint) {
    switch (type) {
    case VERSION_AND_NAMESPACEID:
      return new LocatedBlocksWithMetaInfo(
          computeContentSummary().getLength(), blocks,
          isUnderConstruction(), DataTransferProtocol.DATA_TRANSFER_VERSION,
          namespaceid, methodsFingerprint);
    case VERSION:
      return new VersionedLocatedBlocks(computeContentSummary().getLength(), blocks,
View Full Code Here

  public static LocatedBlockWithMetaInfo getLastLocatedBlock(
      ClientProtocol namenode, String src
  ) throws IOException {
    //get block info for the last block
    LocatedBlocksWithMetaInfo locations = namenode.openAndFetchMetaInfo (src, 0, Long.MAX_VALUE);
    List<LocatedBlock> blocks = locations.getLocatedBlocks();
    DataNode.LOG.info("blocks.size()=" + blocks.size());
    assertTrue(blocks.size() > 0);

    LocatedBlock blk = blocks.get(blocks.size() - 1);
    return new LocatedBlockWithMetaInfo(blk.getBlock(), blk.getLocations(),
        blk.getStartOffset(),
        locations.getDataProtocolVersion(), locations.getNamespaceID(),
        locations.getMethodFingerPrint());
  }
View Full Code Here

    // Generate source file and get its locations.
    String filename = "/testCopyBlockAPI";
    DFSTestUtil.createFile(fs, new Path(filename), 1023 * 10, (short) 3,
        (long) 0);
    FileStatus srcFileStatus = fs.getFileStatus(new Path(filename));
    LocatedBlocksWithMetaInfo lbkSrcMetaInfo = cluster.getNameNode()
        .openAndFetchMetaInfo(filename, 0, Long.MAX_VALUE);
    int srcNamespaceId = lbkSrcMetaInfo.getNamespaceID();
    LocatedBlock lbkSrc = lbkSrcMetaInfo.getLocatedBlocks().get(0);
    DatanodeInfo[] srcLocs = lbkSrc.getLocations();

    // Create destination file and add a single block.
    String newFile = "/testCopyBlockAPI_new";
    String clientName = newFile;
View Full Code Here

  }

  public boolean verifyBlockLocations(String src, String destination,
      NameNode srcNameNode, NameNode dstNameNode, boolean hardlink)
      throws IOException {
    LocatedBlocksWithMetaInfo srcLocatedBlocks =
      srcNameNode.openAndFetchMetaInfo(src, 0,Long.MAX_VALUE);
    List<LocatedBlock> srcblocks = srcLocatedBlocks.getLocatedBlocks();
    LocatedBlocksWithMetaInfo dstLocatedBlocks =
      dstNameNode.openAndFetchMetaInfo(destination, 0, Long.MAX_VALUE);
    List<LocatedBlock> dstblocks = dstLocatedBlocks.getLocatedBlocks();

    assertEquals(srcblocks.size(), dstblocks.size());

    Iterator<LocatedBlock> srcIt = srcblocks.iterator();
    Iterator<LocatedBlock> dstIt = dstblocks.iterator();
    while (srcIt.hasNext()) {
      LocatedBlock srcBlock = srcIt.next();
      LocatedBlock dstBlock = dstIt.next();
      List<DatanodeInfo> srcLocations = Arrays.asList(srcBlock.getLocations());
      List<DatanodeInfo> dstLocations = Arrays.asList(dstBlock.getLocations());
     
      System.out.println("Locations for src block : " + srcBlock.getBlock()
          + " file : " + src);
      for (DatanodeInfo info : srcLocations) {
        System.out.println("Datanode : " + info.toString() + " rack: " + info.getNetworkLocation());
      }

      System.out.println("Locations for dst block : " + dstBlock.getBlock()
          + " file : " + destination);
      for (DatanodeInfo info : dstLocations) {
        System.out.println("Datanode : " + info.toString() + " rack: " + info.getNetworkLocation());
      }

      assertEquals(srcLocations.size(), dstLocations.size());

      if (srcNameNode.getNameNodeAddress().equals(
          dstNameNode.getNameNodeAddress())) {
        // Same FS copy, verify blocks are machine local.
        assertTrue(srcLocations.containsAll(dstLocations));
        assertTrue(dstLocations.containsAll(srcLocations));
      } else {
        // Since all datanodes are on the same host in a unit test, the inter
        // filesystem copy can have blocks end up on any datanode.
        Iterator<DatanodeInfo> sit = srcLocations.iterator();
        while (sit.hasNext()) {
          DatanodeInfo srcInfo = sit.next();

          // Verify location.
          Iterator<DatanodeInfo> dit = dstLocations.iterator();
          while (dit.hasNext()) {
            DatanodeInfo dstInfo = dit.next();
            if (dstInfo.getHost().equals(srcInfo.getHost())) {
              verifyHardLinks(srcInfo, dstInfo,
                  srcLocatedBlocks.getNamespaceID(), srcBlock.getBlock(),
                  dstLocatedBlocks.getNamespaceID(), dstBlock.getBlock(),
                  hardlink);
            }
          }
        }
      }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.LocatedBlocksWithMetaInfo

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.