Package org.apache.hadoop.hbase

Examples of org.apache.hadoop.hbase.Server


    }
  }
 
  @Test
  public void testFailedOpenRegion() throws Exception {
    Server server = new MockServer(HTU);
    RegionServerServices rsServices = new MockRegionServerServices();

    // Create it OFFLINE, which is what it expects
    ZKAssign.createNodeOffline(server.getZooKeeper(), TEST_HRI, server.getServerName());
    ZKAssign.transitionNodeOpening(server.getZooKeeper(), TEST_HRI, server.getServerName());

    // Create the handler
    OpenRegionHandler handler =
      new OpenRegionHandler(server, rsServices, TEST_HRI, TEST_HTD) {
        @Override
        HRegion openRegion() {
          // Fake failure of opening a region due to an IOE, which is caught
          return null;
        }
    };
    handler.process();

    // Handler should have transitioned it to FAILED_OPEN
    RegionTransitionData data =
      ZKAssign.getData(server.getZooKeeper(), TEST_HRI.getEncodedName());
    assertEquals(EventType.RS_ZK_REGION_FAILED_OPEN, data.getEventType());
  }
View Full Code Here


    assertEquals(EventType.RS_ZK_REGION_FAILED_OPEN, data.getEventType());
  }
 
  @Test
  public void testFailedUpdateMeta() throws Exception {
    Server server = new MockServer(HTU);
    RegionServerServices rsServices = new MockRegionServerServices();

    // Create it OFFLINE, which is what it expects
    ZKAssign.createNodeOffline(server.getZooKeeper(), TEST_HRI, server.getServerName());
    ZKAssign.transitionNodeOpening(server.getZooKeeper(), TEST_HRI, server.getServerName());
    // Create the handler
    OpenRegionHandler handler =
      new OpenRegionHandler(server, rsServices, TEST_HRI, TEST_HTD) {
        @Override
        boolean updateMeta(final HRegion r) {
          // Fake failure of updating META
          return false;
        }
    };
    handler.process();

    // Handler should have transitioned it to FAILED_OPEN
    RegionTransitionData data =
      ZKAssign.getData(server.getZooKeeper(), TEST_HRI.getEncodedName());
    assertEquals(EventType.RS_ZK_REGION_FAILED_OPEN, data.getEventType());
  }
View Full Code Here

        .openDaughterRegion((Server) Mockito.anyObject(),
            (HRegion) Mockito.anyObject());

    // Run the execute.  Look at what it returns.
    boolean expectedException = false;
    Server mockServer = Mockito.mock(Server.class);
    when(mockServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration());
    try {
      spiedUponSt.execute(mockServer, null);
    } catch (IOException e) {
      if (e.getCause() != null &&
          e.getCause() instanceof MockedFailedDaughterOpen) {
View Full Code Here

    // Start transaction.
    SplitTransaction st = prepareGOOD_SPLIT_ROW();

    // Run the execute.  Look at what it returns.
    Server mockServer = Mockito.mock(Server.class);
    when(mockServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration());
    PairOfSameType<HRegion> daughters = st.execute(mockServer, null);
    // Do some assertions about execution.
    assertTrue(this.fs.exists(st.getSplitDir()));
    // Assert the parent region is closed.
    assertTrue(this.parent.isClosed());
View Full Code Here

    SplitTransaction spiedUponSt = spy(st);
    when(spiedUponSt.createDaughterRegion(spiedUponSt.getSecondDaughter(), null)).
      thenThrow(new MockedFailedDaughterCreation());
    // Run the execute.  Look at what it returns.
    boolean expectedException = false;
    Server mockServer = Mockito.mock(Server.class);
    when(mockServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration());
    try {
      spiedUponSt.execute(mockServer, null);
    } catch (MockedFailedDaughterCreation e) {
      expectedException = true;
    }
View Full Code Here

 
  @Test
  public void testNodeFailoverWorkerCopyQueuesFromRSUsingMulti() throws Exception {
    LOG.debug("testNodeFailoverWorkerCopyQueuesFromRSUsingMulti");
    conf.setBoolean(HConstants.ZOOKEEPER_USEMULTI, true);
    final Server server = new DummyServer("hostname0.example.org");
    AtomicBoolean replicating = new AtomicBoolean(true);
    ReplicationZookeeper rz = new ReplicationZookeeper(server, replicating);
    // populate some znodes in the peer znode
    files.add("log1");
    files.add("log2");
    for (String file : files) {
      rz.addLogToList(file, "1");
    }
    // create 3 DummyServers
    Server s1 = new DummyServer("dummyserver1.example.org");
    Server s2 = new DummyServer("dummyserver2.example.org");
    Server s3 = new DummyServer("dummyserver3.example.org");

    // create 3 DummyNodeFailoverWorkers
    DummyNodeFailoverWorker w1 = new DummyNodeFailoverWorker(
        server.getServerName().getServerName(), s1);
    DummyNodeFailoverWorker w2 = new DummyNodeFailoverWorker(
View Full Code Here

  @Test
  public void testNodeFailoverDeadServerParsing() throws Exception {
    LOG.debug("testNodeFailoverDeadServerParsing");
    conf.setBoolean(HConstants.ZOOKEEPER_USEMULTI, true);
    final Server server = new DummyServer("ec2-54-234-230-108.compute-1.amazonaws.com");
    AtomicBoolean replicating = new AtomicBoolean(true);
    ReplicationZookeeper rz = new ReplicationZookeeper(server, replicating);
    // populate some znodes in the peer znode
    files.add("log1");
    files.add("log2");
    for (String file : files) {
      rz.addLogToList(file, "1");
    }
    // create 3 DummyServers
    Server s1 = new DummyServer("ip-10-8-101-114.ec2.internal");
    Server s2 = new DummyServer("ec2-107-20-52-47.compute-1.amazonaws.com");
    Server s3 = new DummyServer("ec2-23-20-187-167.compute-1.amazonaws.com");

    // simulate three server fail sequentially
    ReplicationZookeeper rz1 = new ReplicationZookeeper(s1, new AtomicBoolean(true));
    SortedMap<String, SortedSet<String>> testMap =
        rz1.copyQueuesFromRSUsingMulti(server.getServerName().getServerName());
View Full Code Here

    // set TTL
    long ttl = 2000;
    conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
      "org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner");
    conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
    Server server = new DummyServer();
    Path archivedHfileDir = new Path(UTIL.getDataTestDir(), HConstants.HFILE_ARCHIVE_DIRECTORY);
    FileSystem fs = FileSystem.get(conf);
    HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir);

    // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
View Full Code Here

  @Test
  public void testRemovesEmptyDirectories() throws Exception {
    Configuration conf = UTIL.getConfiguration();
    // no cleaner policies = delete all files
    conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, "");
    Server server = new DummyServer();
    Path archivedHfileDir = new Path(UTIL.getDataTestDir(), HConstants.HFILE_ARCHIVE_DIRECTORY);

    // setup the cleaner
    FileSystem fs = UTIL.getDFSCluster().getFileSystem();
    HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir);
View Full Code Here

  @Test
  public void testCleanParent() throws IOException, InterruptedException {
    HBaseTestingUtility htu = new HBaseTestingUtility();
    setRootDirAndCleanIt(htu, "testCleanParent");
    Server server = new MockServer(htu);
    try {
      MasterServices services = new MockMasterServices(server);
      CatalogJanitor janitor = new CatalogJanitor(server, services);
      // Create regions.
      HTableDescriptor htd = new HTableDescriptor("table");
      htd.addFamily(new HColumnDescriptor("f"));
      HRegionInfo parent =
        new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"),
            Bytes.toBytes("eee"));
      HRegionInfo splita =
        new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"),
            Bytes.toBytes("ccc"));
      HRegionInfo splitb =
        new HRegionInfo(htd.getName(), Bytes.toBytes("ccc"),
            Bytes.toBytes("eee"));
      // Test that when both daughter regions are in place, that we do not
      // remove the parent.
      List<KeyValue> kvs = new ArrayList<KeyValue>();
      kvs.add(new KeyValue(parent.getRegionName(), HConstants.CATALOG_FAMILY,
          HConstants.SPLITA_QUALIFIER, Writables.getBytes(splita)));
      kvs.add(new KeyValue(parent.getRegionName(), HConstants.CATALOG_FAMILY,
          HConstants.SPLITB_QUALIFIER, Writables.getBytes(splitb)));
      Result r = new Result(kvs);
      // Add a reference under splitA directory so we don't clear out the parent.
      Path rootdir = services.getMasterFileSystem().getRootDir();
      Path tabledir =
        HTableDescriptor.getTableDir(rootdir, htd.getName());
      Path storedir = Store.getStoreHomedir(tabledir, splita.getEncodedName(),
          htd.getColumnFamilies()[0].getName());
      Reference ref = new Reference(Bytes.toBytes("ccc"), Reference.Range.top);
      long now = System.currentTimeMillis();
      // Reference name has this format: StoreFile#REF_NAME_PARSER
      Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
      FileSystem fs = services.getMasterFileSystem().getFileSystem();
      Path path = ref.write(fs, p);
      assertTrue(fs.exists(path));
      assertFalse(janitor.cleanParent(parent, r));
      // Remove the reference file and try again.
      assertTrue(fs.delete(p, true));
      assertTrue(janitor.cleanParent(parent, r));
    } finally {
      server.stop("shutdown");
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.Server

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.