Examples of restartDataNode()


Examples of org.apache.hadoop.hdfs.MiniDFSCluster.restartDataNode()

          Thread.sleep(1000);
        } catch (InterruptedException ignored) {}
      }
     
      // restart the datanode so the corrupt replica will be detected
      cluster.restartDataNode(dnProps);
      DFSTestUtil.waitReplication(fs, fileName, (short)2);
     
      String blockPoolId = cluster.getNamesystem().getBlockPoolId();
      final DatanodeID corruptDataNode =
        DataNodeTestUtils.getDNRegistrationForBP(
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster.restartDataNode()

      int dnToCorrupt = DFSTestUtil.firstDnWithBlock(cluster, b);
      assertTrue(MiniDFSCluster.corruptReplica(dnToCorrupt, b));

      // Restart the datanode so blocks are re-scanned, and the corrupt
      // block is detected.
      cluster.restartDataNode(dnToCorrupt);

      // Wait for the namenode to notice the corrupt replica
      DFSTestUtil.waitCorruptReplicas(fs, ns, filePath, b, 1);

      // The rack policy is still respected
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster.restartDataNode()

      }
      // the block will be replicated
      DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);

      // restart the first datanode
      cluster.restartDataNode(dnprop);
      cluster.waitActive();
     
      // check if excessive replica is detected
      NumberReplicas num = null;
      do {
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster.restartDataNode()

      do {
        num = namesystem.countNodes(block);
      } while (num.liveReplicas() != REPLICATION_FACTOR);
     
      // restart the first datanode
      cluster.restartDataNode(dnprop);
      cluster.waitActive();
     
      // check if excessive replica is detected
      do {
       num = namesystem.countNodes(block);
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster.restartDataNode()

     
      // the block will be replicated
      DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);

      // restart the first datanode
      cluster.restartDataNode(dnprop);
      cluster.waitActive();
     
      // check if excessive replica is detected (transient)
      initializeTimeout(TIMEOUT);
      while (countNodes(block.getLocalBlock(), namesystem).excessReplicas() == 0) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster.restartDataNode()

      while (countNodes(block.getLocalBlock(), namesystem).liveReplicas() != REPLICATION_FACTOR) {
        checkTimeout("live replica count not correct", 1000);
      }

      // restart the first datanode
      cluster.restartDataNode(dnprop);
      cluster.waitActive();

      // check if excessive replica is detected (transient)
      initializeTimeout(TIMEOUT);
      while (countNodes(block.getLocalBlock(), namesystem).excessReplicas() != 2) {
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster.restartDataNode()

        LOG.info("=========================== restarting cluster");
        DataNodeProperties otherNode = cluster.stopDataNode(0);
        cluster.restartNameNode();
       
        // Restart the datanode with the corrupt replica first.
        cluster.restartDataNode(oldGenstampNode);
        cluster.waitActive();

        // Then the other node
        cluster.restartDataNode(otherNode);
        cluster.waitActive();
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster.restartDataNode()

        // Restart the datanode with the corrupt replica first.
        cluster.restartDataNode(oldGenstampNode);
        cluster.waitActive();

        // Then the other node
        cluster.restartDataNode(otherNode);
        cluster.waitActive();
       
        // Compute and send invalidations, waiting until they're fully processed.
        cluster.getNameNode().getNamesystem().getBlockManager()
          .computeInvalidateWork(2);
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster.restartDataNode()

      StaticMapping.addNodeToRack(hosts[invalidIdx], racks[validIdx]);
      LOG.info("datanode " + validIdx + " came up with network location " +
        info[0].getNetworkLocation());

      // Restart the DN with the invalid topology and wait for it to register.
      cluster.restartDataNode(invalidIdx);
      Thread.sleep(5000);
      while (true) {
        info = nn.getDatanodeReport(DatanodeReportType.LIVE);
        if (info.length == 2) {
          break;
View Full Code Here

Examples of org.apache.hadoop.hdfs.MiniDFSCluster.restartDataNode()

          Thread.sleep(1000);
        } catch (InterruptedException ignored) {}
      }
     
      // restart the datanode so the corrupt replica will be detected
      cluster.restartDataNode(dnProps);
      DFSTestUtil.waitReplication(fs, fileName, (short)2);
     
      final DatanodeID corruptDataNode =
        cluster.getDataNodes().get(2).dnRegistration;
      final FSNamesystem namesystem = FSNamesystem.getFSNamesystem();
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.