Package org.apache.hadoop.hdfs.server.protocol

Examples of org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo


  @Test
  public void testRBW_RWRReplicas() throws IOException {
    if(LOG.isDebugEnabled()) {
      LOG.debug("Running " + GenericTestUtils.getMethodName());
    }
    ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID,
        REPLICA_LEN1, GEN_STAMP-1, ReplicaState.RBW);
    ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID,
        REPLICA_LEN1, GEN_STAMP-2, ReplicaState.RWR);

    InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
    InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
View Full Code Here


  @Test
  public void testRWRReplicas() throws IOException {
    if(LOG.isDebugEnabled()) {
      LOG.debug("Running " + GenericTestUtils.getMethodName());
    }
    ReplicaRecoveryInfo replica1 = new ReplicaRecoveryInfo(BLOCK_ID,
        REPLICA_LEN1, GEN_STAMP-1, ReplicaState.RWR);
    ReplicaRecoveryInfo replica2 = new ReplicaRecoveryInfo(BLOCK_ID,
        REPLICA_LEN2, GEN_STAMP-2, ReplicaState.RWR);

    InterDatanodeProtocol dn1 = mock(InterDatanodeProtocol.class);
    InterDatanodeProtocol dn2 = mock(InterDatanodeProtocol.class);
View Full Code Here

  public void testZeroLenReplicas() throws IOException, InterruptedException {
    if(LOG.isDebugEnabled()) {
      LOG.debug("Running " + GenericTestUtils.getMethodName());
    }
    DataNode spyDN = spy(dn);
    doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0,
        block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN).
        initReplicaRecovery(any(RecoveringBlock.class));
    Daemon d = spyDN.recoverBlocks(initRecoveringBlocks());
    d.join();
    verify(dn.namenode).commitBlockSynchronization(
View Full Code Here

  private List<BlockRecord> initBlockRecords(DataNode spyDN) {
    List<BlockRecord> blocks = new ArrayList<BlockRecord>(1);
    BlockRecord blockRecord = new BlockRecord(
        new DatanodeID(dn.dnRegistration), spyDN,
        new ReplicaRecoveryInfo(block.getBlockId(), block.getNumBytes(),
            block.getGenerationStamp(), ReplicaState.FINALIZED));
    blocks.add(blockRecord);
    return blocks;
  }
View Full Code Here

      //normal case
      final Block b = blocks[0];
      final ReplicaInfo originalInfo = map.get(b);

      final long recoveryid = gs + 1;
      final ReplicaRecoveryInfo recoveryInfo = FSDataset.initReplicaRecovery(map, blocks[0], recoveryid);
      assertEquals(originalInfo, recoveryInfo);

      final ReplicaUnderRecovery updatedInfo = (ReplicaUnderRecovery)map.get(b);
      Assert.assertEquals(originalInfo.getBlockId(), updatedInfo.getBlockId());
      Assert.assertEquals(recoveryid, updatedInfo.getRecoveryID());

      //recover one more time
      final long recoveryid2 = gs + 2;
      final ReplicaRecoveryInfo recoveryInfo2 = FSDataset.initReplicaRecovery(map, blocks[0], recoveryid2);
      assertEquals(originalInfo, recoveryInfo2);

      final ReplicaUnderRecovery updatedInfo2 = (ReplicaUnderRecovery)map.get(b);
      Assert.assertEquals(originalInfo.getBlockId(), updatedInfo2.getBlockId());
      Assert.assertEquals(recoveryid2, updatedInfo2.getRecoveryID());
     
      //case RecoveryInProgressException
      try {
        FSDataset.initReplicaRecovery(map, b, recoveryid);
        Assert.fail();
      }
      catch(RecoveryInProgressException ripe) {
        System.out.println("GOOD: getting " + ripe);
      }
    }

    { // BlockRecoveryFI_01: replica not found
      final long recoveryid = gs + 1;
      final Block b = new Block(firstblockid - 1, length, gs);
      ReplicaRecoveryInfo r = FSDataset.initReplicaRecovery(map, b, recoveryid);
      Assert.assertNull("Data-node should not have this replica.", r);
    }
   
    { // BlockRecoveryFI_02: "THIS IS NOT SUPPOSED TO HAPPEN" with recovery id < gs 
      final long recoveryid = gs - 1;
View Full Code Here

      //initReplicaRecovery
      final Block b = locatedblock.getBlock();
      final long recoveryid = b.getGenerationStamp() + 1;
      final long newlength = b.getNumBytes() - 1;
      final ReplicaRecoveryInfo rri = fsdataset.initReplicaRecovery(
          new RecoveringBlock(b, null, recoveryid));

      //check replica
      final ReplicaInfo replica = fsdataset.fetchReplicaInfo(b.getBlockId());
      Assert.assertEquals(ReplicaState.RUR, replica.getState());

      //check meta data before update
      FSDataset.checkReplicaFiles(replica);

      //case "THIS IS NOT SUPPOSED TO HAPPEN"
      //with (block length) != (stored replica's on disk length).
      {
        //create a block with same id and gs but different length.
        final Block tmp = new Block(rri.getBlockId(), rri.getNumBytes() - 1,
            rri.getGenerationStamp());
        try {
          //update should fail
          fsdataset.updateReplicaUnderRecovery(tmp, recoveryid, newlength);
          Assert.fail();
        } catch(IOException ioe) {
View Full Code Here

    BInfo binfo = blockMap.get(b);
    if (binfo == null) {
      throw new IOException("No such Block " + b )
    }

    return new ReplicaRecoveryInfo(binfo.getBlockId(), binfo.getBytesOnDisk(),
        binfo.getGenerationStamp(),
        binfo.isFinalized()?ReplicaState.FINALIZED : ReplicaState.RBW);
  }
View Full Code Here

        BPOfferService bpos = blockPoolManager.get(blookPoolId);
        DatanodeRegistration bpReg = bpos.bpRegistration;
        InterDatanodeProtocol datanode = bpReg.equals(id)?
            this: DataNode.createInterDataNodeProtocolProxy(id, getConf(),
                socketTimeout);
        ReplicaRecoveryInfo info = callInitReplicaRecovery(datanode, rBlock);
        if (info != null &&
            info.getGenerationStamp() >= block.getGenerationStamp() &&
            info.getNumBytes() > 0) {
          syncList.add(new BlockRecord(id, datanode, info));
        }
      } catch (RecoveryInProgressException ripE) {
        InterDatanodeProtocol.LOG.warn(
            "Recovery for replica " + block + " on data-node " + id
View Full Code Here

    for(DatanodeID id : datanodeids) {
      try {
        InterDatanodeProtocol datanode = dnRegistration.equals(id)?
            this: DataNode.createInterDataNodeProtocolProxy(id, getConf(),
                socketTimeout);
        ReplicaRecoveryInfo info = callInitReplicaRecovery(datanode, rBlock);
        if (info != null &&
            info.getGenerationStamp() >= block.getGenerationStamp() &&
            info.getNumBytes() > 0) {
          syncList.add(new BlockRecord(id, datanode, info));
        }
      } catch (RecoveryInProgressException ripE) {
        InterDatanodeProtocol.LOG.warn(
            "Recovery for replica " + block + " on data-node " + id
View Full Code Here

        + "\n  recoveryId=" + recoveryId
        + "\n  original=" + original;
  }

  ReplicaRecoveryInfo createInfo() {
    return new ReplicaRecoveryInfo(original.getBlockId(),
        original.getBytesOnDisk(), original.getGenerationStamp(),
        getOrignalReplicaState());
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.