Package org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand

Examples of org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock


  }

  public static RecoveringBlock convert(RecoveringBlockProto b) {
    ExtendedBlock block = convert(b.getBlock().getB());
    DatanodeInfo[] locs = convert(b.getBlock().getLocsList());
    return new RecoveringBlock(block, locs, b.getNewGenStamp());
  }
View Full Code Here


            .getLeaseRecoveryCommand(Integer.MAX_VALUE);
        if (blocks != null) {
          BlockRecoveryCommand brCommand = new BlockRecoveryCommand(
              blocks.length);
          for (BlockInfoUnderConstruction b : blocks) {
            brCommand.add(new RecoveringBlock(
                new ExtendedBlock(blockPoolId, b), b.getExpectedLocations(), b
                    .getBlockRecoveryId()));
          }
          return new DatanodeCommand[] { brCommand };
        }
View Full Code Here

      ExtendedBlock b = locatedblock.getBlock();
      InterDatanodeProtocol.LOG.info("b=" + b + ", " + b.getClass());
      checkMetaInfo(b, datanode);
      long recoveryId = b.getGenerationStamp() + 1;
      idp.initReplicaRecovery(
          new RecoveringBlock(b, locatedblock.getLocations(), recoveryId));

      //verify updateBlock
      ExtendedBlock newblock = new ExtendedBlock(b.getBlockPoolId(),
          b.getBlockId(), b.getNumBytes()/2, b.getGenerationStamp()+1);
      idp.updateReplicaUnderRecovery(b, recoveryId, newblock.getNumBytes());
      checkMetaInfo(newblock, datanode);
     
      // Verify correct null response trying to init recovery for a missing block
      ExtendedBlock badBlock = new ExtendedBlock("fake-pool",
          b.getBlockId(), 0, 0);
      assertNull(idp.initReplicaRecovery(
          new RecoveringBlock(badBlock,
              locatedblock.getLocations(), recoveryId)));
    }
    finally {
      if (cluster != null) {cluster.shutdown();}
    }
View Full Code Here

      final ExtendedBlock b = locatedblock.getBlock();
      final long recoveryid = b.getGenerationStamp() + 1;
      final long newlength = b.getNumBytes() - 1;
      final FsDatasetSpi<?> fsdataset = DataNodeTestUtils.getFSDataset(datanode);
      final ReplicaRecoveryInfo rri = fsdataset.initReplicaRecovery(
          new RecoveringBlock(b, null, recoveryid));

      //check replica
      final ReplicaInfo replica = FsDatasetTestUtil.fetchReplicaInfo(
          fsdataset, bpid, b.getBlockId());
      Assert.assertEquals(ReplicaState.RUR, replica.getState());
View Full Code Here

    InterDatanodeProtocol proxy = null;

    try {
      proxy = DataNode.createInterDataNodeProtocolProxy(
          dInfo, conf, 500, false);
      proxy.initReplicaRecovery(new RecoveringBlock(
          new ExtendedBlock("bpid", 1), null, 100));
      fail ("Expected SocketTimeoutException exception, but did not get.");
    } finally {
      if (proxy != null) {
        RPC.stopProxy(proxy);
View Full Code Here

            if (recoveryLocations.size() > 1) {
              if (recoveryLocations.size() != expectedLocations.length) {
                LOG.info("Skipped stale nodes for recovery : " +
                    (expectedLocations.length - recoveryLocations.size()));
              }
              brCommand.add(new RecoveringBlock(
                  new ExtendedBlock(blockPoolId, b),
                  recoveryLocations.toArray(new DatanodeDescriptor[recoveryLocations.size()]),
                  b.getBlockRecoveryId()));
            } else {
              // If too many replicas are stale, then choose all replicas to participate
              // in block recovery.
              brCommand.add(new RecoveringBlock(
                  new ExtendedBlock(blockPoolId, b),
                  expectedLocations,
                  b.getBlockRecoveryId()));
            }
          }
View Full Code Here

  }

  public static RecoveringBlock convert(RecoveringBlockProto b) {
    ExtendedBlock block = convert(b.getBlock().getB());
    DatanodeInfo[] locs = convert(b.getBlock().getLocsList());
    return new RecoveringBlock(block, locs, b.getNewGenStamp());
  }
View Full Code Here

            .getLeaseRecoveryCommand(Integer.MAX_VALUE);
        if (blocks != null) {
          BlockRecoveryCommand brCommand = new BlockRecoveryCommand(
              blocks.length);
          for (BlockInfoUnderConstruction b : blocks) {
            brCommand.add(new RecoveringBlock(
                new ExtendedBlock(blockPoolId, b), b.getExpectedLocations(), b
                    .getBlockRecoveryId()));
          }
          return new DatanodeCommand[] { brCommand };
        }
View Full Code Here

  }
 
  @Test
  public void testConvertRecoveringBlock() {
    DatanodeInfo[] dnInfo = new DatanodeInfo[] { getDNInfo(), getDNInfo() };
    RecoveringBlock b = new RecoveringBlock(getExtendedBlock(), dnInfo, 3);
    RecoveringBlockProto bProto = PBHelper.convert(b);
    RecoveringBlock b1 = PBHelper.convert(bProto);
    assertEquals(b.getBlock(), b1.getBlock());
    DatanodeInfo[] dnInfo1 = b1.getLocations();
    assertEquals(dnInfo.length, dnInfo1.length);
    for (int i=0; i < dnInfo.length; i++) {
      compare(dnInfo[0], dnInfo1[0]);
    }
  }
View Full Code Here

  @Test
  public void testConvertBlockRecoveryCommand() {
    DatanodeInfo[] dnInfo = new DatanodeInfo[] { getDNInfo(), getDNInfo() };

    List<RecoveringBlock> blks = ImmutableList.of(
      new RecoveringBlock(getExtendedBlock(1), dnInfo, 3),
      new RecoveringBlock(getExtendedBlock(2), dnInfo, 3)
    );
   
    BlockRecoveryCommand cmd = new BlockRecoveryCommand(blks);
    BlockRecoveryCommandProto proto = PBHelper.convert(cmd);
    assertEquals(1, proto.getBlocks(0).getBlock().getB().getBlockId());
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.