Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.LocatedBlockWithMetaInfo


      DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L);
      assertTrue(dfs.dfs.exists(filestr));
      DFSTestUtil.waitReplication(dfs, filepath, REPLICATION_NUM);

      //get block info for the last block
      LocatedBlockWithMetaInfo locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock(
          dfs.dfs.namenode, filestr);
      int namespaceId = locatedblock.getNamespaceID();
      DatanodeInfo[] datanodeinfos = locatedblock.getLocations();
      assertEquals(REPLICATION_NUM, datanodeinfos.length);

      //connect to data nodes
      InterDatanodeProtocol[] idps = new InterDatanodeProtocol[REPLICATION_NUM];
      DataNode[] datanodes = new DataNode[REPLICATION_NUM];
      for(int i = 0; i < REPLICATION_NUM; i++) {
      idps[i] = DataNode.createInterDataNodeProtocolProxy(
    datanodeinfos[i], conf, 0);
        datanodes[i] = cluster.getDataNode(datanodeinfos[i].getIpcPort());
        assertTrue(datanodes[i] != null);
      }
     
      //verify BlockMetaDataInfo
      Block lastblock = locatedblock.getBlock();
      DataNode.LOG.info("newblocks=" + lastblock);
      for(int i = 0; i < REPLICATION_NUM; i++) {
        checkMetaInfo(namespaceId, lastblock, idps[i]);
      }
View Full Code Here


              throw new IOException(
                  "Fastcopy is not allowed from "
                      + "a non-federeated HDFS cluster to a federated HDFS cluster!");
            }

            LocatedBlockWithMetaInfo dstLocatedBlockWithMetaInfo = dstNamenode
                .addBlockAndFetchMetaInfo(destination, clientName, null,
                    favoredNodes, startPos);
            destinationLocatedBlock = dstLocatedBlockWithMetaInfo;
          } else if (dstNamenodeProtocolProxy.isMethodSupported(
              "addBlockAndFetchMetaInfo", String.class, String.class,
              DatanodeInfo[].class, DatanodeInfo[].class)) {
            if (!supportFederation) {
              throw new IOException(
                  "Fastcopy is not allowed from "
                      + "a non-federeated HDFS cluster to a federated HDFS cluster!");
            }
            LocatedBlockWithMetaInfo dstLocatedBlockWithMetaInfo = dstNamenode
                .addBlockAndFetchMetaInfo(destination, clientName, null,
                    favoredNodes);
            destinationLocatedBlock = dstLocatedBlockWithMetaInfo;
          } else {
            if (supportFederation) {
View Full Code Here

    try {
      stat = getFileInfo(src);
      if (namenodeProtocolProxy != null
          && namenodeProtocolProxy.isMethodSupported(
              "appendAndFetchMetaInfo", String.class, String.class)) {
        LocatedBlockWithMetaInfo loc = namenode.appendAndFetchMetaInfo(src,
            clientName);
        lastBlock = loc;
        if (loc != null) {
          namespaceId = loc.getNamespaceID();
          updateDataTransferProtocolVersionIfNeeded(loc.getDataProtocolVersion());
          getNewNameNodeIfNeeded(loc.getMethodFingerPrint());
        }
      } else {
        lastBlock = namenode.append(src, clientName);
      }
View Full Code Here

            return dfsClient.namenode.addBlock(src, dfsClient.clientName);
          }
          dfsClient.updateDataTransferProtocolVersionIfNeeded(
              loc.getDataProtocolVersion());
          if (loc instanceof LocatedBlockWithMetaInfo) {
            LocatedBlockWithMetaInfo metaLoc = (LocatedBlockWithMetaInfo)loc;
            this.namespaceId = metaLoc.getNamespaceID();
            dfsClient.getNewNameNodeIfNeeded(metaLoc.getMethodFingerPrint());
          }
          return loc;
        } catch (RemoteException e) {
          IOException ue =
            e.unwrapRemoteException(FileNotFoundException.class,
View Full Code Here

    }
    long serverVersion = ClientProtocol.versionID;
    int serverFpFromNn = ProtocolSignature.getFingerprint(ProtocolSignature.getProtocolSignature(
        0, serverVersion, inter).getMethods());
   
    LocatedBlockWithMetaInfo loc = client.namenode.addBlockAndFetchMetaInfo("/testNameNodeFingerprintSent.txt",
        client.getClientName(), null, 0L);
   
    int serverFp = loc.getMethodFingerPrint();
    TestCase.assertEquals(serverFpFromNn, serverFp);   

    FileSystem fs = cluster.getFileSystem();
    Path f = new Path("/testNameNodeFingerprintSent1.txt");
    DataOutputStream a_out = fs.create(f);
View Full Code Here

      DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L);
      assertTrue(dfs.dfs.exists(filestr));
      DFSTestUtil.waitReplication(dfs, filepath, REPLICATION_NUM);

      //get block info for the last block
      LocatedBlockWithMetaInfo locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock(
          dfs.dfs.namenode, filestr);
      int namespaceId = locatedblock.getNamespaceID();
      DatanodeInfo[] datanodeinfos = locatedblock.getLocations();
      assertEquals(REPLICATION_NUM, datanodeinfos.length);

      //connect to data nodes
      InterDatanodeProtocol[] idps = new InterDatanodeProtocol[REPLICATION_NUM];
      DataNode[] datanodes = new DataNode[REPLICATION_NUM];
      for(int i = 0; i < REPLICATION_NUM; i++) {
      idps[i] = DataNode.createInterDataNodeProtocolProxy(
    datanodeinfos[i], conf, 0);
        datanodes[i] = cluster.getDataNode(datanodeinfos[i].getIpcPort());
        assertTrue(datanodes[i] != null);
      }
     
      //verify BlockMetaDataInfo
      Block lastblock = locatedblock.getBlock();
      DataNode.LOG.info("newblocks=" + lastblock);
      for(int i = 0; i < REPLICATION_NUM; i++) {
        checkMetaInfo(namespaceId, lastblock, idps[i]);
      }
View Full Code Here

      // get data node
      DataNode datanode = cluster.getDataNode(nodes[0].getIpcPort());
      assertTrue(datanode != null);

      // verifies checksum file is of length 0
      LocatedBlockWithMetaInfo locatedblock = TestInterDatanodeProtocol
          .getLastLocatedBlock(dfs.dfs.namenode, filestr);
      Block lastblock = locatedblock.getBlock();
      DataNode.LOG.info("newblocks=" + lastblock);
      BlockPathInfo blockPathInfo = datanode.getBlockPathInfo(lastblock);
      String blockPath = blockPathInfo.getBlockPath();
      String metaPath = blockPathInfo.getMetaPath();

      File f = new File(blockPath);
      File meta = new File(metaPath);
      assertEquals(0, f.length());
      // set the checksum file to 0
      meta.delete();
      DataOutputStream outs = new DataOutputStream(new FileOutputStream(
          metaPath, false));
      outs.close();

      // issue recovery and makit e sure it succeeds.
      int numTries = 500;
      for (int idxTry = 0; idxTry < numTries; idxTry++) {
        boolean success = dfs.recoverLease(filepath);
        if (success) {
          break;
        } else if (idxTry == numTries - 1) {
          TestCase.fail("Recovery lease failed");
        } else {
          Thread.sleep(10);
        }
      }

      // make sure the meta file is still empty
      locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock(
          dfs.dfs.namenode, filestr);
      Block newBlock = locatedblock.getBlock();
      blockPathInfo = datanode.getBlockPathInfo(newBlock);
      assertEquals(0, blockPathInfo.getNumBytes());
      metaPath = blockPathInfo.getMetaPath();
      meta = new File(metaPath);
      assertEquals(0, meta.length());
View Full Code Here

              throw new IOException(
                  "Fastcopy is not allowed from "
                      + "a non-federeated HDFS cluster to a federated HDFS cluster!");
            }

            LocatedBlockWithMetaInfo dstLocatedBlockWithMetaInfo = dstNamenode
                .addBlockAndFetchMetaInfo(destination, clientName, null,
                    favoredNodes, startPos);
            destinationLocatedBlock = dstLocatedBlockWithMetaInfo;
          } else if (dstNamenodeProtocolProxy.isMethodSupported(
              "addBlockAndFetchMetaInfo", String.class, String.class,
              DatanodeInfo[].class, DatanodeInfo[].class)) {
            if (!supportFederation) {
              throw new IOException(
                  "Fastcopy is not allowed from "
                      + "a non-federeated HDFS cluster to a federated HDFS cluster!");
            }
            LocatedBlockWithMetaInfo dstLocatedBlockWithMetaInfo = dstNamenode
                .addBlockAndFetchMetaInfo(destination, clientName, null,
                    favoredNodes);
            destinationLocatedBlock = dstLocatedBlockWithMetaInfo;
          } else {
            if (supportFederation) {
View Full Code Here

            } else {
              return namenode.addBlock(src, clientName);
            }
            updateDataTransferProtocolVersionIfNeeded(loc.getDataProtocolVersion());
            if (loc instanceof LocatedBlockWithMetaInfo) {
              LocatedBlockWithMetaInfo metaLoc = (LocatedBlockWithMetaInfo)loc;
              this.namespaceId = metaLoc.getNamespaceID();
              getNewNameNodeIfNeeded(metaLoc.getMethodFingerPrint());
            }
            return loc;
          } catch (RemoteException e) {
            IOException ue =
              e.unwrapRemoteException(FileNotFoundException.class,
View Full Code Here

    try {
      stat = getFileInfo(src);
      if (namenodeProtocolProxy != null
          && namenodeProtocolProxy.isMethodSupported(
              "appendAndFetchMetaInfo", String.class, String.class)) {
        LocatedBlockWithMetaInfo loc = namenode.appendAndFetchMetaInfo(src,
            clientName);
        lastBlock = loc;
        if (loc != null) {
          namespaceId = loc.getNamespaceID();
          updateDataTransferProtocolVersionIfNeeded(loc.getDataProtocolVersion());
          getNewNameNodeIfNeeded(loc.getMethodFingerPrint());
        }
      } else {
        lastBlock = namenode.append(src, clientName);
      }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.LocatedBlockWithMetaInfo

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.