Package org.apache.hadoop.hbase

Examples of org.apache.hadoop.hbase.Server


        .openDaughterRegion((Server) Mockito.anyObject(),
            (HRegion) Mockito.anyObject());

    // Run the execute.  Look at what it returns.
    boolean expectedException = false;
    Server mockServer = Mockito.mock(Server.class);
    when(mockServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration());
    try {
      spiedUponSt.execute(mockServer, null);
    } catch (IOException e) {
      if (e.getCause() != null &&
          e.getCause() instanceof MockedFailedDaughterOpen) {
View Full Code Here


    // Start transaction.
    SplitTransaction st = prepareGOOD_SPLIT_ROW();

    // Run the execute.  Look at what it returns.
    Server mockServer = Mockito.mock(Server.class);
    when(mockServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration());
    PairOfSameType<HRegion> daughters = st.execute(mockServer, null);
    // Do some assertions about execution.
    assertTrue(this.fs.exists(st.getSplitDir()));
    // Assert the parent region is closed.
    assertTrue(this.parent.isClosed());
View Full Code Here

    SplitTransaction spiedUponSt = spy(st);
    when(spiedUponSt.createDaughterRegion(spiedUponSt.getSecondDaughter(), null)).
      thenThrow(new MockedFailedDaughterCreation());
    // Run the execute.  Look at what it returns.
    boolean expectedException = false;
    Server mockServer = Mockito.mock(Server.class);
    when(mockServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration());
    try {
      spiedUponSt.execute(mockServer, null);
    } catch (MockedFailedDaughterCreation e) {
      expectedException = true;
    }
View Full Code Here

    serverAndLoad.put(SERVERNAME_A, null);
    Mockito.when(this.serverManager.getOnlineServers()).thenReturn(serverAndLoad);
    Mockito.when(this.serverManager.isServerOnline(SERVERNAME_B)).thenReturn(false);
    Mockito.when(this.serverManager.isServerOnline(SERVERNAME_A)).thenReturn(true);
    HTU.getConfiguration().setInt(HConstants.MASTER_PORT, 0);
    Server server = new HMaster(HTU.getConfiguration());
    Whitebox.setInternalState(server, "serverManager", this.serverManager);
    assignmentCount = 0;
    AssignmentManagerWithExtrasForTesting am = setUpMockedAssignmentManager(server,
        this.serverManager);
    am.regionOnline(new HRegionInfo("t1".getBytes(), HConstants.EMPTY_START_ROW,
View Full Code Here

  @Test
  public void testCleanParent() throws IOException, InterruptedException {
    HBaseTestingUtility htu = new HBaseTestingUtility();
    setRootDirAndCleanIt(htu, "testCleanParent");
    Server server = new MockServer(htu);
    try {
      MasterServices services = new MockMasterServices(server);
      CatalogJanitor janitor = new CatalogJanitor(server, services);
      // Create regions.
      HTableDescriptor htd = new HTableDescriptor("table");
      htd.addFamily(new HColumnDescriptor("f"));
      HRegionInfo parent =
        new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"),
            Bytes.toBytes("eee"));
      HRegionInfo splita =
        new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"),
            Bytes.toBytes("ccc"));
      HRegionInfo splitb =
        new HRegionInfo(htd.getName(), Bytes.toBytes("ccc"),
            Bytes.toBytes("eee"));
      // Test that when both daughter regions are in place, that we do not
      // remove the parent.
      List<KeyValue> kvs = new ArrayList<KeyValue>();
      kvs.add(new KeyValue(parent.getRegionName(), HConstants.CATALOG_FAMILY,
          HConstants.SPLITA_QUALIFIER, Writables.getBytes(splita)));
      kvs.add(new KeyValue(parent.getRegionName(), HConstants.CATALOG_FAMILY,
          HConstants.SPLITB_QUALIFIER, Writables.getBytes(splitb)));
      Result r = new Result(kvs);
      // Add a reference under splitA directory so we don't clear out the parent.
      Path rootdir = services.getMasterFileSystem().getRootDir();
      Path tabledir =
        HTableDescriptor.getTableDir(rootdir, htd.getName());
      Path storedir = Store.getStoreHomedir(tabledir, splita.getEncodedName(),
          htd.getColumnFamilies()[0].getName());
      Reference ref = new Reference(Bytes.toBytes("ccc"), Reference.Range.top);
      long now = System.currentTimeMillis();
      // Reference name has this format: StoreFile#REF_NAME_PARSER
      Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
      FileSystem fs = services.getMasterFileSystem().getFileSystem();
      Path path = ref.write(fs, p);
      assertTrue(fs.exists(path));
      assertFalse(janitor.cleanParent(parent, r));
      // Remove the reference file and try again.
      assertTrue(fs.delete(p, true));
      assertTrue(janitor.cleanParent(parent, r));
    } finally {
      server.stop("shutdown");
    }
  }
View Full Code Here

  private void parentWithSpecifiedEndKeyCleanedEvenIfDaughterGoneFirst(
  final String rootDir, final byte[] lastEndKey)
  throws IOException, InterruptedException {
    HBaseTestingUtility htu = new HBaseTestingUtility();
    setRootDirAndCleanIt(htu, rootDir);
    Server server = new MockServer(htu);
    MasterServices services = new MockMasterServices(server);
    CatalogJanitor janitor = new CatalogJanitor(server, services);
    final HTableDescriptor htd = createHTableDescriptor();

    // Create regions: aaa->{lastEndKey}, aaa->ccc, aaa->bbb, bbb->ccc, etc.
View Full Code Here

   */
  @Test
  public void testScanDoesNotCleanRegionsWithExistingParents() throws Exception {
    HBaseTestingUtility htu = new HBaseTestingUtility();
    setRootDirAndCleanIt(htu, "testScanDoesNotCleanRegionsWithExistingParents");
    Server server = new MockServer(htu);
    MasterServices services = new MockMasterServices(server);

    final HTableDescriptor htd = createHTableDescriptor();

    // Create regions: aaa->{lastEndKey}, aaa->ccc, aaa->bbb, bbb->ccc, etc.
View Full Code Here

  @Test
  public void testArchiveOldRegion() throws Exception {
    String table = "table";
    HBaseTestingUtility htu = new HBaseTestingUtility();
    setRootDirAndCleanIt(htu, "testCleanParent");
    Server server = new MockServer(htu);
    MasterServices services = new MockMasterServices(server);

    // create the janitor
    CatalogJanitor janitor = new CatalogJanitor(server, services);

    // Create regions.
    HTableDescriptor htd = new HTableDescriptor(table);
    htd.addFamily(new HColumnDescriptor("f"));
    HRegionInfo parent = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
    HRegionInfo splita = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
    HRegionInfo splitb = new HRegionInfo(htd.getName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
    // Test that when both daughter regions are in place, that we do not
    // remove the parent.
    List<KeyValue> kvs = new ArrayList<KeyValue>();
    kvs.add(new KeyValue(parent.getRegionName(), HConstants.CATALOG_FAMILY,
        HConstants.SPLITA_QUALIFIER, Writables.getBytes(splita)));
    kvs.add(new KeyValue(parent.getRegionName(), HConstants.CATALOG_FAMILY,
        HConstants.SPLITB_QUALIFIER, Writables.getBytes(splitb)));
    Result r = new Result(kvs);

    FileSystem fs = FileSystem.get(htu.getConfiguration());
    Path rootdir = services.getMasterFileSystem().getRootDir();
    // have to set the root directory since we use it in HFileDisposer to figure out to get to the
    // archive directory. Otherwise, it just seems to pick the first root directory it can find (so
    // the single test passes, but when the full suite is run, things get borked).
    FSUtils.setRootDir(fs.getConf(), rootdir);
    Path tabledir = HTableDescriptor.getTableDir(rootdir, htd.getName());
    Path storedir = Store.getStoreHomedir(tabledir, parent.getEncodedName(),
      htd.getColumnFamilies()[0].getName());

    // delete the file and ensure that the files have been archived
    Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent,
      tabledir, htd.getColumnFamilies()[0].getName());

    // enable archiving, make sure that files get archived
    addMockStoreFiles(2, services, storedir);
    // get the current store files for comparison
    FileStatus[] storeFiles = fs.listStatus(storedir);
    for (FileStatus file : storeFiles) {
      System.out.println("Have store file:" + file.getPath());
    }

    // do the cleaning of the parent
    assertTrue(janitor.cleanParent(parent, r));

    // and now check to make sure that the files have actually been archived
    FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive);
    assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs);

    // cleanup
    services.stop("Test finished");
    server.stop("shutdown");
    janitor.join();
  }
View Full Code Here

  @Test
  public void testDuplicateHFileResolution() throws Exception {
    String table = "table";
    HBaseTestingUtility htu = new HBaseTestingUtility();
    setRootDirAndCleanIt(htu, "testCleanParent");
    Server server = new MockServer(htu);
    MasterServices services = new MockMasterServices(server);

    // create the janitor
    CatalogJanitor janitor = new CatalogJanitor(server, services);

    // Create regions.
    HTableDescriptor htd = new HTableDescriptor(table);
    htd.addFamily(new HColumnDescriptor("f"));
    HRegionInfo parent = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
    HRegionInfo splita = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
    HRegionInfo splitb = new HRegionInfo(htd.getName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
    // Test that when both daughter regions are in place, that we do not
    // remove the parent.
    List<KeyValue> kvs = new ArrayList<KeyValue>();
    kvs.add(new KeyValue(parent.getRegionName(), HConstants.CATALOG_FAMILY,
        HConstants.SPLITA_QUALIFIER, Writables.getBytes(splita)));
    kvs.add(new KeyValue(parent.getRegionName(), HConstants.CATALOG_FAMILY,
        HConstants.SPLITB_QUALIFIER, Writables.getBytes(splitb)));
    Result r = new Result(kvs);

    FileSystem fs = FileSystem.get(htu.getConfiguration());

    Path rootdir = services.getMasterFileSystem().getRootDir();
    // have to set the root directory since we use it in HFileDisposer to figure out to get to the
    // archive directory. Otherwise, it just seems to pick the first root directory it can find (so
    // the single test passes, but when the full suite is run, things get borked).
    FSUtils.setRootDir(fs.getConf(), rootdir);
    Path tabledir = HTableDescriptor.getTableDir(rootdir, parent.getTableName());
    Path storedir = Store.getStoreHomedir(tabledir, parent.getEncodedName(),
      htd.getColumnFamilies()[0].getName());
    System.out.println("Old root:" + rootdir);
    System.out.println("Old table:" + tabledir);
    System.out.println("Old store:" + storedir);

    Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent,
      tabledir, htd.getColumnFamilies()[0].getName());
    System.out.println("Old archive:" + storeArchive);

    // enable archiving, make sure that files get archived
    addMockStoreFiles(2, services, storedir);
    // get the current store files for comparison
    FileStatus[] storeFiles = fs.listStatus(storedir);

    // do the cleaning of the parent
    assertTrue(janitor.cleanParent(parent, r));

    // and now check to make sure that the files have actually been archived
    FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive);
    assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs);

    // now add store files with the same names as before to check backup
    // enable archiving, make sure that files get archived
    addMockStoreFiles(2, services, storedir);

    // do the cleaning of the parent
    assertTrue(janitor.cleanParent(parent, r));

    // and now check to make sure that the files have actually been archived
    archivedStoreFiles = fs.listStatus(storeArchive);
    assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs, true);

    // cleanup
    services.stop("Test finished");
    server.stop("shutdown");
    janitor.join();
  }
View Full Code Here

    // set TTL
    long ttl = 2000;
    conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
      "org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner");
    conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
    Server server = new DummyServer();
    Path archivedHfileDir = new Path(UTIL.getDataTestDir(), HConstants.HFILE_ARCHIVE_DIRECTORY);
    FileSystem fs = FileSystem.get(conf);
    HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir);

    // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.Server

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.