Package org.apache.hadoop.hbase.regionserver

Examples of org.apache.hadoop.hbase.regionserver.HRegion$MutationBatch


      // Enable after.
      HRegionInfo rootHRI = new HRegionInfo(HRegionInfo.ROOT_REGIONINFO);
      setInfoFamilyCachingForRoot(false);
      HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
      setInfoFamilyCachingForMeta(false);
      HRegion root = HRegion.createHRegion(rootHRI, rd, c,
          HTableDescriptor.ROOT_TABLEDESC);
      HRegion meta = HRegion.createHRegion(metaHRI, rd, c,
          HTableDescriptor.META_TABLEDESC);
      setInfoFamilyCachingForRoot(true);
      setInfoFamilyCachingForMeta(true);
      // Add first region from the META table to the ROOT region.
      HRegion.addRegionToMETA(root, meta);
      root.close();
      root.getLog().closeAndDelete();
      meta.close();
      meta.getLog().closeAndDelete();
    } catch (IOException e) {
      e = RemoteExceptionHandler.checkIOException(e);
      LOG.error("bootstrap", e);
      throw e;
    }
View Full Code Here


  public void testColumnCountGetFilter() throws IOException {
    String family = "Family";
    HTableDescriptor htd = new HTableDescriptor("testColumnCountGetFilter");
    htd.addFamily(new HColumnDescriptor(family));
    HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
    HRegion region = HRegion.createHRegion(info, TEST_UTIL.
      getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
    try {
      String valueString = "ValueString";
      String row = "row-1";
      List<String> columns = generateRandomWords(10000, "column");
      Put p = new Put(Bytes.toBytes(row));
      p.setWriteToWAL(false);
      for (String column : columns) {
        KeyValue kv = KeyValueTestUtil.create(row, family, column, 0, valueString);
        p.add(kv);
      }
      region.put(p);

      Get get = new Get(row.getBytes());
      Filter filter = new ColumnCountGetFilter(100);
      get.setFilter(filter);
      Scan scan = new Scan(get);
      InternalScanner scanner = region.getScanner(scan);
      List<KeyValue> results = new ArrayList<KeyValue>();
      scanner.next(results);
      assertEquals(100, results.size());
    } finally {
      region.close();
      region.getLog().closeAndDelete();
    }

    region.close();
    region.getLog().closeAndDelete();
  }
View Full Code Here

  public void testColumnCountGetFilterWithFilterList() throws IOException {
    String family = "Family";
    HTableDescriptor htd = new HTableDescriptor("testColumnCountGetFilter");
    htd.addFamily(new HColumnDescriptor(family));
    HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
    HRegion region = HRegion.createHRegion(info, TEST_UTIL.
      getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
    try {
      String valueString = "ValueString";
      String row = "row-1";
      List<String> columns = generateRandomWords(10000, "column");
      Put p = new Put(Bytes.toBytes(row));
      p.setWriteToWAL(false);
      for (String column : columns) {
        KeyValue kv = KeyValueTestUtil.create(row, family, column, 0, valueString);
        p.add(kv);
      }
      region.put(p);

      Get get = new Get(row.getBytes());
      FilterList filterLst = new FilterList ();
      filterLst.addFilter( new ColumnCountGetFilter(100));
      get.setFilter(filterLst);
      Scan scan = new Scan(get);
      InternalScanner scanner = region.getScanner(scan);
      List<KeyValue> results = new ArrayList<KeyValue>();
      scanner.next(results);
      assertEquals(100, results.size());
    } finally {
      region.close();
      region.getLog().closeAndDelete();
    }

    region.close();
    region.getLog().closeAndDelete();
  }
View Full Code Here

      throws IOException {
    HTableDescriptor htd = new HTableDescriptor(tableName);
    htd.addFamily(hcd);
    HRegionInfo info =
        new HRegionInfo(Bytes.toBytes(tableName), null, null, false);
    HRegion region =
        HRegion.createHRegion(info, getDataTestDir(), getConfiguration(), htd);
    return region;
  }
View Full Code Here

    assertNotNull(HFileArchiveUtil.getStoreArchivePath(conf, region, tabledir, family));
    conf = new Configuration();
    assertNotNull(HFileArchiveUtil.getStoreArchivePath(conf, region, tabledir, family));

    // do a little mocking of a region to get the same results
    HRegion mockRegion = Mockito.mock(HRegion.class);
    Mockito.when(mockRegion.getRegionInfo()).thenReturn(region);
    Mockito.when(mockRegion.getTableDir()).thenReturn(tabledir);

    assertNotNull(HFileArchiveUtil.getStoreArchivePath(null, mockRegion, family));
    conf = new Configuration();
    assertNotNull(HFileArchiveUtil.getStoreArchivePath(conf, mockRegion, family));
  }
View Full Code Here

    // Find regionserver carrying meta.
    List<RegionServerThread> regionServerThreads =
      cluster.getRegionServerThreads();
    int count = -1;
    HRegion metaRegion = null;
    for (RegionServerThread regionServerThread : regionServerThreads) {
      HRegionServer regionServer = regionServerThread.getRegionServer();
      metaRegion = regionServer.getOnlineRegion(HRegionInfo.FIRST_META_REGIONINFO.getRegionName());
      count++;
      regionServer.abort("");
View Full Code Here

  }

  HRegion createRegion(final HRegionInfo  hri, final Path rootdir, final Configuration c,
      final HTableDescriptor htd)
  throws IOException {
    HRegion r = HRegion.createHRegion(hri, rootdir, c, htd);
    // The above call to create a region will create an hlog file.  Each
    // log file create will also create a running thread to do syncing.  We need
    // to close out this log else we will have a running thread trying to sync
    // the file system continuously which is ugly when dfs is taken away at the
    // end of the test.
View Full Code Here

    // get the current store files for the region
    List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(TABLE_NAME);
    // make sure we only have 1 region serving this table
    assertEquals(1, servingRegions.size());
    HRegion region = servingRegions.get(0);

    // and load the table
    UTIL.loadRegion(region, TEST_FAM);

    // shutdown the table so we can manipulate the files
    admin.disableTable(STRING_TABLE_NAME);

    FileSystem fs = UTIL.getTestFileSystem();

    // now attempt to depose the region
    Path regionDir = HRegion.getRegionDir(region.getTableDir().getParent(), region.getRegionInfo());

    HFileArchiver.archiveRegion(UTIL.getConfiguration(), fs, region.getRegionInfo());

    // check for the existence of the archive directory and some files in it
    Path archiveDir = HFileArchiveTestingUtil.getRegionArchiveDir(UTIL.getConfiguration(), region);
    assertTrue(fs.exists(archiveDir));
View Full Code Here

  public void testDeleteRegionWithNoStoreFiles() throws Exception {
    // get the current store files for the region
    List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(TABLE_NAME);
    // make sure we only have 1 region serving this table
    assertEquals(1, servingRegions.size());
    HRegion region = servingRegions.get(0);

    FileSystem fs = region.getFilesystem();

    // make sure there are some files in the regiondir
    Path rootDir = FSUtils.getRootDir(fs.getConf());
    Path regionDir = HRegion.getRegionDir(rootDir, region.getRegionInfo());
    FileStatus[] regionFiles = FSUtils.listStatus(fs, regionDir, null);
    Assert.assertNotNull("No files in the region directory", regionFiles);
    if (LOG.isDebugEnabled()) {
      List<Path> files = new ArrayList<Path>();
      for (FileStatus file : regionFiles) {
        files.add(file.getPath());
      }
      LOG.debug("Current files:" + files);
    }
    // delete the visible folders so we just have hidden files/folders
    final PathFilter dirFilter = new FSUtils.DirFilter(fs);
    PathFilter nonHidden = new PathFilter() {
      @Override
      public boolean accept(Path file) {
        return dirFilter.accept(file) && !file.getName().toString().startsWith(".");
      }
    };
    FileStatus[] storeDirs = FSUtils.listStatus(fs, regionDir, nonHidden);
    for (FileStatus store : storeDirs) {
      LOG.debug("Deleting store for test");
      fs.delete(store.getPath(), true);
    }

    // then archive the region
    HFileArchiver.archiveRegion(UTIL.getConfiguration(), fs, region.getRegionInfo());

    // and check to make sure the region directoy got deleted
    assertFalse("Region directory (" + regionDir + "), still exists.", fs.exists(regionDir));
  }
View Full Code Here

  @Test
  public void testArchiveOnTableDelete() throws Exception {
    List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(TABLE_NAME);
    // make sure we only have 1 region serving this table
    assertEquals(1, servingRegions.size());
    HRegion region = servingRegions.get(0);

    // get the parent RS and monitor
    HRegionServer hrs = UTIL.getRSForFirstRegionInTable(TABLE_NAME);
    FileSystem fs = hrs.getFileSystem();

    // put some data on the region
    LOG.debug("-------Loading table");
    UTIL.loadRegion(region, TEST_FAM);

    // get the hfiles in the region
    List<HRegion> regions = hrs.getOnlineRegions(TABLE_NAME);
    assertEquals("More that 1 region for test table.", 1, regions.size());

    region = regions.get(0);
    // wait for all the compactions to complete
    region.waitForFlushesAndCompactions();

    // disable table to prevent new updates
    UTIL.getHBaseAdmin().disableTable(TABLE_NAME);
    LOG.debug("Disabled table");

    // remove all the files from the archive to get a fair comparison
    clearArchiveDirectory();

    // then get the current store files
    Path regionDir = region.getRegionDir();
    List<String> storeFiles = getRegionStoreFiles(fs, regionDir);

    // then delete the table so the hfiles get archived
    UTIL.deleteTable(TABLE_NAME);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.regionserver.HRegion$MutationBatch

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.