Package org.apache.hadoop.test.MultithreadedTestUtil

Examples of org.apache.hadoop.test.MultithreadedTestUtil.TestContext


     
      FileSystem fs = harness.getFailoverFs();
      DistributedFileSystem fsAsOtherUser = createFsAsOtherUser(
          cluster, harness.conf);
     
      TestContext testers = new TestContext();
      for (int i = 0; i < STRESS_NUM_THREADS; i++) {
        Path p = new Path("/test-" + i);
        testers.addThread(new PipelineTestThread(
            testers, fs, fsAsOtherUser, p));
      }
     
      // Start a separate thread which will make sure that replication
      // happens quickly by triggering deletion reports and replication
      // work calculation frequently.
      harness.addReplicationTriggerThread(500);
      harness.addFailoverThread(5000);
      harness.startThreads();
      testers.startThreads();
     
      testers.waitFor(STRESS_RUNTIME);
      testers.stop();
      harness.stopThreads();
    } finally {
      System.err.println("===========================\n\n\n\n");
      harness.shutdown();
    }
View Full Code Here


        .nnTopology(topology)
        .numDataNodes(0)
        .build();
    cluster.waitActive();

    ctx = new TestContext();
    ctx.addThread(thr1 = new ZKFCThread(ctx, 0));
    assertEquals(0, thr1.zkfc.run(new String[]{"-formatZK"}));

    thr1.start();
    waitForHAState(0, HAServiceState.ACTIVE);
View Full Code Here

    RPC.setProtocolEngine(conf, TestRpcService.class, opts.rpcEngine);

    Server server = startServer(opts);
    try {
     
      TestContext ctx = setupClientTestContext(opts);
      if (ctx != null) {
        long totalCalls = 0;
        ctx.startThreads();
        long veryStart = System.nanoTime();

        // Loop printing results every second until the specified
        // time has elapsed
        for (int i = 0; i < opts.secondsToRun ; i++) {
          long st = System.nanoTime();
          ctx.waitFor(1000);
          long et = System.nanoTime();
          long ct = callCount.getAndSet(0);
          totalCalls += ct;
          double callsPerSec = (ct * 1000000000)/(et - st);
          System.out.println("Calls per second: " + callsPerSec);
        }
       
        // Print results

        if (totalCalls > 0) {
          long veryEnd = System.nanoTime();
          double callsPerSec =
            (totalCalls * 1000000000)/(veryEnd - veryStart);
          long cpuNanosClient = getTotalCpuTime(ctx.getTestThreads());
          long cpuNanosServer = -1;
          if (server != null) {
            cpuNanosServer = getTotalCpuTime(server.getHandlers());;
          }
          System.out.println("====== Results ======");
          System.out.println("Options:\n" + opts);
          System.out.println("Total calls per second: " + callsPerSec);
          System.out.println("CPU time per call on client: " +
              (cpuNanosClient / totalCalls) + " ns");
          if (server != null) {
            System.out.println("CPU time per call on server: " +
                (cpuNanosServer / totalCalls) + " ns");
          }
        } else {
          System.out.println("No calls!");
        }

        ctx.stop();
      } else {
        while (true) {
          Thread.sleep(10000);
        }
      }
View Full Code Here

      msgBuilder.append('x');
    }
    final String echoMessage = msgBuilder.toString();

    // Create the clients in a test context
    TestContext ctx = new TestContext();
    for (int i = 0; i < opts.clientThreads; i++) {
      final RpcServiceWrapper proxy = proxies[i % numProxies];
     
      ctx.addThread(new MultithreadedTestUtil.RepeatingTestThread(ctx) {
        @Override
        public void doAnAction() throws Exception {
          proxy.doEcho(echoMessage);
          callCount.incrementAndGet();
        }
View Full Code Here

    try {
      cluster.waitActive();
      cluster.transitionToActive(0);
     
      FileSystem fs = harness.getFailoverFs();
      TestContext togglers = new TestContext();
      for (int i = 0; i < NUM_THREADS; i++) {
        Path p = new Path("/test-" + i);
        DFSTestUtil.createFile(fs, p, BLOCK_SIZE*10, (short)3, (long)i);
        togglers.addThread(new ReplicationToggler(togglers, fs, p));
      }
     
      // Start a separate thread which will make sure that replication
      // happens quickly by triggering deletion reports and replication
      // work calculation frequently.
      harness.addReplicationTriggerThread(500);
      harness.addFailoverThread(5000);
      harness.startThreads();
      togglers.startThreads();
     
      togglers.waitFor(RUNTIME);
      togglers.stop();
      harness.stopThreads();

      // CHeck that the files can be read without throwing
      for (int i = 0; i < NUM_THREADS; i++) {
        Path p = new Path("/test-" + i);
View Full Code Here

     
      FileSystem fs = harness.getFailoverFs();
      DistributedFileSystem fsAsOtherUser = createFsAsOtherUser(
          cluster, harness.conf);
     
      TestContext testers = new TestContext();
      for (int i = 0; i < STRESS_NUM_THREADS; i++) {
        Path p = new Path("/test-" + i);
        testers.addThread(new PipelineTestThread(
            testers, fs, fsAsOtherUser, p));
      }
     
      // Start a separate thread which will make sure that replication
      // happens quickly by triggering deletion reports and replication
      // work calculation frequently.
      harness.addReplicationTriggerThread(500);
      harness.addFailoverThread(5000);
      harness.startThreads();
      testers.startThreads();
     
      testers.waitFor(STRESS_RUNTIME);
      testers.stop();
      harness.stopThreads();
    } finally {
      System.err.println("===========================\n\n\n\n");
      harness.shutdown();
    }
View Full Code Here

        .when(spyLock).writeLock();
     
      final FileSystem fs = HATestUtil.configureFailoverFs(
          cluster, conf);
     
      TestContext ctx = new TestContext();
      for (int i = 0; i < 50; i++) {
        final int finalI = i;
        ctx.addThread(new RepeatingTestThread(ctx) {
          @Override
          public void doAnAction() throws Exception {
            Path p = new Path("/test-" + finalI);
            fs.mkdirs(p);
            fs.delete(p, true);
          }
        });
      }
     
      ctx.addThread(new RepeatingTestThread(ctx) {
        @Override
        public void doAnAction() throws Exception {
          cluster.transitionToStandby(0);
          Thread.sleep(50);
          cluster.transitionToActive(0);
        }
      });
      ctx.startThreads();
      ctx.waitFor(20000);
      ctx.stop();
    } finally {
      cluster.shutdown();
    }
  }
View Full Code Here

    RPC.setProtocolEngine(conf, TestRpcService.class, opts.rpcEngine);

    Server server = startServer(opts);
    try {
     
      TestContext ctx = setupClientTestContext(opts);
      if (ctx != null) {
        long totalCalls = 0;
        ctx.startThreads();
        long veryStart = System.nanoTime();

        // Loop printing results every second until the specified
        // time has elapsed
        for (int i = 0; i < opts.secondsToRun ; i++) {
          long st = System.nanoTime();
          ctx.waitFor(1000);
          long et = System.nanoTime();
          long ct = callCount.getAndSet(0);
          totalCalls += ct;
          double callsPerSec = (ct * 1000000000)/(et - st);
          System.out.println("Calls per second: " + callsPerSec);
        }
       
        // Print results

        if (totalCalls > 0) {
          long veryEnd = System.nanoTime();
          double callsPerSec =
            (totalCalls * 1000000000)/(veryEnd - veryStart);
          long cpuNanosClient = getTotalCpuTime(ctx.getTestThreads());
          long cpuNanosServer = -1;
          if (server != null) {
            cpuNanosServer = getTotalCpuTime(server.getHandlers());;
          }
          System.out.println("====== Results ======");
          System.out.println("Options:\n" + opts);
          System.out.println("Total calls per second: " + callsPerSec);
          System.out.println("CPU time per call on client: " +
              (cpuNanosClient / totalCalls) + " ns");
          if (server != null) {
            System.out.println("CPU time per call on server: " +
                (cpuNanosServer / totalCalls) + " ns");
          }
        } else {
          System.out.println("No calls!");
        }

        ctx.stop();
      } else {
        while (true) {
          Thread.sleep(10000);
        }
      }
View Full Code Here

      msgBuilder.append('x');
    }
    final String echoMessage = msgBuilder.toString();

    // Create the clients in a test context
    TestContext ctx = new TestContext();
    for (int i = 0; i < opts.clientThreads; i++) {
      final RpcServiceWrapper proxy = proxies[i % numProxies];
     
      ctx.addThread(new MultithreadedTestUtil.RepeatingTestThread(ctx) {
        @Override
        public void doAnAction() throws Exception {
          proxy.doEcho(echoMessage);
          callCount.incrementAndGet();
        }
View Full Code Here

        .when(spyLock).writeLock();
     
      final FileSystem fs = HATestUtil.configureFailoverFs(
          cluster, conf);
     
      TestContext ctx = new TestContext();
      for (int i = 0; i < 50; i++) {
        final int finalI = i;
        ctx.addThread(new RepeatingTestThread(ctx) {
          @Override
          public void doAnAction() throws Exception {
            Path p = new Path("/test-" + finalI);
            fs.mkdirs(p);
            fs.delete(p, true);
          }
        });
      }
     
      ctx.addThread(new RepeatingTestThread(ctx) {
        @Override
        public void doAnAction() throws Exception {
          cluster.transitionToStandby(0);
          Thread.sleep(50);
          cluster.transitionToActive(0);
        }
      });
      ctx.startThreads();
      ctx.waitFor(20000);
      ctx.stop();
    } finally {
      cluster.shutdown();
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.test.MultithreadedTestUtil.TestContext

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.