Package org.apache.hadoop.hdfs.server.protocol

Examples of org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol


      assertTrue(datanodeinfo.length > 0);

      //connect to a data node
      DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
      assertTrue(datanode != null);
      InterDatanodeProtocol idp = DataNode.createInterDataNodeProtocolProxy(
          datanodeinfo[0], conf, datanode.socketTimeout, useDnHostname);
     
      //stop block scanner, so we could compare lastScanTime
      datanode.blockScannerThread.interrupt();

      //verify BlockMetaDataInfo
      Block b = locatedblock.getBlock();
      InterDatanodeProtocol.LOG.info("b=" + b + ", " + b.getClass());
      checkMetaInfo(b, idp, datanode.blockScanner);

      //verify updateBlock
      Block newblock = new Block(
          b.getBlockId(), b.getNumBytes()/2, b.getGenerationStamp()+1);
      idp.updateBlock(b, newblock, false);
      checkMetaInfo(newblock, idp, datanode.blockScanner);
    }
    finally {
      if (cluster != null) {cluster.shutdown();}
    }
View Full Code Here


    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
    DatanodeID fakeDnId = new DatanodeID(
        "localhost:" + addr.getPort(), "fake-storage", 0, addr.getPort());
    DatanodeInfo dInfo = new DatanodeInfo(fakeDnId);
    InterDatanodeProtocol proxy = null;

    try {
      proxy = DataNode.createInterDataNodeProtocolProxy(
          dInfo, conf, 500, false);
      fail ("Expected SocketTimeoutException exception, but did not get.");
View Full Code Here

      int rwrCount = 0;
     
      List<BlockRecord> blockRecords = new ArrayList<BlockRecord>();
      for (DatanodeInfo id : targets) {
        try {
          InterDatanodeProtocol datanode = dnRegistration.equals(id) ? this
            : DataNode.createInterDataNodeProtocolProxy(
                id, getConf(), socketTimeout, connectToDnViaHostname);
          BlockRecoveryInfo info = datanode.startBlockRecovery(block);
          if (info == null) {
            LOG.info("No block metadata found for block " + block + " on datanode "
                + id);
            continue;
          }
View Full Code Here

      DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
      assertTrue(datanodeinfo.length > 0);

      //connect to a data node
      DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
      InterDatanodeProtocol idp = DataNode.createInterDataNodeProtocolProxy(
          datanodeinfo[0], conf, datanode.getDnConf().socketTimeout);
      assertTrue(datanode != null);
     
      //stop block scanner, so we could compare lastScanTime
      if (datanode.blockScanner != null) {
        datanode.blockScanner.shutdown();
      }

      //verify BlockMetaDataInfo
      ExtendedBlock b = locatedblock.getBlock();
      InterDatanodeProtocol.LOG.info("b=" + b + ", " + b.getClass());
      checkMetaInfo(b, datanode);
      long recoveryId = b.getGenerationStamp() + 1;
      idp.initReplicaRecovery(
          new RecoveringBlock(b, locatedblock.getLocations(), recoveryId));

      //verify updateBlock
      ExtendedBlock newblock = new ExtendedBlock(b.getBlockPoolId(),
          b.getBlockId(), b.getNumBytes()/2, b.getGenerationStamp()+1);
      idp.updateReplicaUnderRecovery(b, recoveryId, newblock.getNumBytes());
      checkMetaInfo(newblock, datanode);
    }
    finally {
      if (cluster != null) {cluster.shutdown();}
    }
View Full Code Here

    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
    DatanodeID fakeDnId = new DatanodeID(
        "localhost:" + addr.getPort(), "fake-storage", 0, addr.getPort());
    DatanodeInfo dInfo = new DatanodeInfo(fakeDnId);
    InterDatanodeProtocol proxy = null;

    try {
      proxy = DataNode.createInterDataNodeProtocolProxy(
          dInfo, conf, 500);
      proxy.initReplicaRecovery(null);
      fail ("Expected SocketTimeoutException exception, but did not get.");
    } catch (SocketTimeoutException e) {
      DataNode.LOG.info("Got expected Exception: SocketTimeoutException" + e);
    } finally {
      if (proxy != null) {
View Full Code Here

    ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID,
        REPLICA_LEN1, GEN_STAMP-1, ReplicaState.FINALIZED);
    ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID,
        REPLICA_LEN1, GEN_STAMP-2, ReplicaState.FINALIZED);

    InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
    InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);

    testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
    verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, REPLICA_LEN1);
    verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, REPLICA_LEN1);   
View Full Code Here

    //check generation stamps
    for(DatanodeID id : datanodeids) {
      try {
        BPOfferService bpos = blockPoolManager.get(blookPoolId);
        DatanodeRegistration bpReg = bpos.bpRegistration;
        InterDatanodeProtocol datanode = bpReg.equals(id)?
            this: DataNode.createInterDataNodeProtocolProxy(id, getConf(),
                dnConf.socketTimeout);
        ReplicaRecoveryInfo info = callInitReplicaRecovery(datanode, rBlock);
        if (info != null &&
            info.getGenerationStamp() >= block.getGenerationStamp() &&
View Full Code Here

    ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID,
        REPLICA_LEN1, GEN_STAMP-1, ReplicaState.FINALIZED);
    ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID,
        REPLICA_LEN1, GEN_STAMP-2, ReplicaState.RBW);

    InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
    InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);

    testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
    verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, REPLICA_LEN1);
    verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, REPLICA_LEN1);
   
View Full Code Here

    ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID,
        REPLICA_LEN1, GEN_STAMP-1, ReplicaState.FINALIZED);
    ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID,
        REPLICA_LEN1, GEN_STAMP-2, ReplicaState.RWR);

    InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
    InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);

    testSyncReplicas(replica1, replica2, dn1, dn2, REPLICA_LEN1);
    verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, REPLICA_LEN1);
    verify(dn2, never()).updateReplicaUnderRecovery(
        block, RECOVERY_ID, REPLICA_LEN1);
View Full Code Here

    ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID,
        REPLICA_LEN1, GEN_STAMP-1, ReplicaState.RBW);
    ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID,
        REPLICA_LEN2, GEN_STAMP-2, ReplicaState.RBW);

    InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
    InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);

    long minLen = Math.min(REPLICA_LEN1, REPLICA_LEN2);
    testSyncReplicas(replica1, replica2, dn1, dn2, minLen);
    verify(dn1).updateReplicaUnderRecovery(block, RECOVERY_ID, minLen);
    verify(dn2).updateReplicaUnderRecovery(block, RECOVERY_ID, minLen);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.