Package org.apache.hadoop.test.GenericTestUtils

Examples of org.apache.hadoop.test.GenericTestUtils.LogCapturer


      for (StorageDirectory sd : storage.dirIterable(null)) {
        assertLockFails(sd);
        savedSd = sd;
      }
     
      LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(Storage.class));
      try {
        // try to lock the storage that's already locked
        savedSd.lock();
        fail("Namenode should not be able to lock a storage that is already locked");
      } catch (IOException ioe) {
        String jvmName = ManagementFactory.getRuntimeMXBean().getName();
        assertTrue("Error message does not include JVM name '" + jvmName
            + "'", logs.getOutput().contains(jvmName));
      }
    } finally {
      cluster.shutdown();
    }
  }
View Full Code Here


      for (StorageDirectory sd : storage.dirIterable(null)) {
        assertLockFails(sd);
        savedSd = sd;
      }
     
      LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(Storage.class));
      try {
        // try to lock the storage that's already locked
        savedSd.lock();
        fail("Namenode should not be able to lock a storage that is already locked");
      } catch (IOException ioe) {
        // cannot read lock file on Windows, so message cannot get JVM name
        String lockingJvmName = Path.WINDOWS ? "" :
          " " + ManagementFactory.getRuntimeMXBean().getName();
        String expectedLogMessage = "It appears that another namenode"
          + lockingJvmName + " has already locked the storage directory";
        assertTrue("Log output does not contain expected log message: "
          + expectedLogMessage, logs.getOutput().contains(expectedLogMessage));
      }
    } finally {
      cleanup(cluster);
      cluster = null;
    }
View Full Code Here

    // Delete the segment.
    assertTrue(editsSegment.delete());
   
    // Trying to bootstrap standby should now fail since the edit
    // logs aren't available in the shared dir.
    LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
        LogFactory.getLog(BootstrapStandby.class));
    try {
      int rc = BootstrapStandby.run(
          new String[]{"-force"},
          cluster.getConfiguration(1));
      assertEquals(BootstrapStandby.ERR_CODE_LOGS_UNAVAILABLE, rc);
    } finally {
      logs.stopCapturing();
    }
    GenericTestUtils.assertMatches(logs.getOutput(),
        "FATAL.*Unable to read transaction ids 1-3 from the configured shared");
  }
View Full Code Here

      for (StorageDirectory sd : storage.dirIterable(null)) {
        assertLockFails(sd);
        savedSd = sd;
      }
     
      LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(Storage.class));
      try {
        // try to lock the storage that's already locked
        savedSd.lock();
        fail("Namenode should not be able to lock a storage that is already locked");
      } catch (IOException ioe) {
        // cannot read lock file on Windows, so message cannot get JVM name
        String lockingJvmName = Path.WINDOWS ? "" :
          " " + ManagementFactory.getRuntimeMXBean().getName();
        String expectedLogMessage = "It appears that another namenode"
          + lockingJvmName + " has already locked the storage directory";
        assertTrue("Log output does not contain expected log message: "
          + expectedLogMessage, logs.getOutput().contains(expectedLogMessage));
      }
    } finally {
      cleanup(cluster);
      cluster = null;
    }
View Full Code Here

      DFSClient client = DFSClientAdapter.getDFSClient((DistributedFileSystem) fs);
      DFSClient spyClient = Mockito.spy(client);
      Mockito.doReturn(false).when(spyClient).shouldEncryptData();
      DFSClientAdapter.setDFSClient((DistributedFileSystem) fs, spyClient);
     
      LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
          LogFactory.getLog(DataNode.class));
      try {
        assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
        if (resolverClazz != null && !resolverClazz.endsWith("TestTrustedChannelResolver")){
          fail("Should not have been able to read without encryption enabled.");
        }
      } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("Could not obtain block:",
            ioe);
      } finally {
        logs.stopCapturing();
      }
      fs.close();
     
      if (resolverClazz != null && !resolverClazz.endsWith("TestTrustedChannelResolver")){
        GenericTestUtils.assertMatches(logs.getOutput(),
        "Failed to read expected encryption handshake from client at");
      }
    } finally {
      if (cluster != null) {
        cluster.shutdown();
View Full Code Here

      DFSClient client = DFSClientAdapter.getDFSClient((DistributedFileSystem) fs);
      DFSClient spyClient = Mockito.spy(client);
      Mockito.doReturn(false).when(spyClient).shouldEncryptData();
      DFSClientAdapter.setDFSClient((DistributedFileSystem) fs, spyClient);
     
      LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
          LogFactory.getLog(DataNode.class));
      try {
        assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
        fail("Should not have been able to read without encryption enabled.");
      } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("Could not obtain block:",
            ioe);
      } finally {
        logs.stopCapturing();
      }
      fs.close();
     
      GenericTestUtils.assertMatches(logs.getOutput(),
          "Failed to read expected encryption handshake from client at");
    } finally {
      if (cluster != null) {
        cluster.shutdown();
      }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.test.GenericTestUtils.LogCapturer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.