Package org.apache.hadoop.hbase

Examples of org.apache.hadoop.hbase.HBaseTestingUtility$SeenRowTracker


    conf1.setLong("hbase.master.logcleaner.ttl", 10);
    conf1.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
    conf1.setBoolean("dfs.support.append", true);
    conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);

    utility1 = new HBaseTestingUtility(conf1);
    utility1.startMiniZKCluster();
    MiniZooKeeperCluster miniZK = utility1.getZkCluster();
    // Have to reget conf1 in case zk cluster location different
    // than default
    conf1 = utility1.getConfiguration()
    zkw1 = new ZooKeeperWatcher(conf1, "cluster1", null, true);
    admin = new ReplicationAdmin(conf1);
    LOG.info("Setup first Zk");

    // Base conf2 on conf1 so it gets the right zk cluster.
    conf2 = HBaseConfiguration.create(conf1);
    conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
    conf2.setInt("hbase.client.retries.number", 6);
    conf2.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
    conf2.setBoolean("dfs.support.append", true);

    utility2 = new HBaseTestingUtility(conf2);
    utility2.setZkCluster(miniZK);
    zkw2 = new ZooKeeperWatcher(conf2, "cluster2", null, true);

    admin.addPeer("2", utility2.getClusterKey());
    setIsReplication(true);
View Full Code Here


      this.region = null;
    }
  }

  @Test public void testgetHDFSBlocksDistribution() throws Exception {
    HBaseTestingUtility htu = new HBaseTestingUtility();
    final int DEFAULT_BLOCK_SIZE = 1024;
    htu.getConfiguration().setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
    htu.getConfiguration().setInt("dfs.replication", 2);


    // set up a cluster with 3 nodes
    MiniHBaseCluster cluster = null;
    String dataNodeHosts[] = new String[] { "host1", "host2", "host3" };
    int regionServersCount = 3;

    try {
      cluster = htu.startMiniCluster(1, regionServersCount, dataNodeHosts);
      byte [][] families = {fam1, fam2};
      HTable ht = htu.createTable(Bytes.toBytes(this.getName()), families);

      //Setting up region
      byte row[] = Bytes.toBytes("row1");
      byte col[] = Bytes.toBytes("col1");

      Put put = new Put(row);
      put.add(fam1, col, 1, Bytes.toBytes("test1"));
      put.add(fam2, col, 1, Bytes.toBytes("test2"));
      ht.put(put);

      HRegion firstRegion = htu.getHBaseCluster().
          getRegions(Bytes.toBytes(this.getName())).get(0);
      firstRegion.flushcache();
      HDFSBlocksDistribution blocksDistribution1 =
          firstRegion.getHDFSBlocksDistribution();

      // given the default replication factor is 2 and we have 2 HFiles,
      // we will have total of 4 replica of blocks on 3 datanodes; thus there
      // must be at least one host that have replica for 2 HFiles. That host's
      // weight will be equal to the unique block weight.
      long uniqueBlocksWeight1 =
          blocksDistribution1.getUniqueBlocksTotalWeight();

      String topHost = blocksDistribution1.getTopHosts().get(0);
      long topHostWeight = blocksDistribution1.getWeight(topHost);
      assertTrue(uniqueBlocksWeight1 == topHostWeight);

      // use the static method to compute the value, it should be the same.
      // static method is used by load balancer or other components
      HDFSBlocksDistribution blocksDistribution2 =
        HRegion.computeHDFSBlocksDistribution(htu.getConfiguration(),
        firstRegion.getTableDesc(),
        firstRegion.getRegionInfo().getEncodedName());
      long uniqueBlocksWeight2 =
        blocksDistribution2.getUniqueBlocksTotalWeight();

      assertTrue(uniqueBlocksWeight1 == uniqueBlocksWeight2);

      ht.close();
      } finally {
        if (cluster != null) {
          htu.shutdownMiniCluster();
        }
      }
  }
View Full Code Here

    assertTrue(hri.equals(HRegionInfo.FIRST_META_REGIONINFO));
  }

  @Test
  public void testCleanParent() throws IOException, InterruptedException {
    HBaseTestingUtility htu = new HBaseTestingUtility();
    setRootDirAndCleanIt(htu, "testCleanParent");
    Server server = new MockServer(htu);
    try {
      MasterServices services = new MockMasterServices(server);
      CatalogJanitor janitor = new CatalogJanitor(server, services);
View Full Code Here

   * @throws InterruptedException
   */
  private void parentWithSpecifiedEndKeyCleanedEvenIfDaughterGoneFirst(
  final String rootDir, final byte[] lastEndKey)
  throws IOException, InterruptedException {
    HBaseTestingUtility htu = new HBaseTestingUtility();
    setRootDirAndCleanIt(htu, rootDir);
    Server server = new MockServer(htu);
    MasterServices services = new MockMasterServices(server);
    CatalogJanitor janitor = new CatalogJanitor(server, services);
    final HTableDescriptor htd = createHTableDescriptor();

    // Create regions: aaa->{lastEndKey}, aaa->ccc, aaa->bbb, bbb->ccc, etc.

    // Parent
    HRegionInfo parent = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"),
      lastEndKey);
    // Sleep a second else the encoded name on these regions comes out
    // same for all with same start key and made in same second.
    Thread.sleep(1001);

    // Daughter a
    HRegionInfo splita = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"),
      Bytes.toBytes("ccc"));
    Thread.sleep(1001);
    // Make daughters of daughter a; splitaa and splitab.
    HRegionInfo splitaa = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"),
      Bytes.toBytes("bbb"));
    HRegionInfo splitab = new HRegionInfo(htd.getName(), Bytes.toBytes("bbb"),
      Bytes.toBytes("ccc"));

    // Daughter b
    HRegionInfo splitb = new HRegionInfo(htd.getName(), Bytes.toBytes("ccc"),
      lastEndKey);
    Thread.sleep(1001);
    // Make Daughters of daughterb; splitba and splitbb.
    HRegionInfo splitba = new HRegionInfo(htd.getName(), Bytes.toBytes("ccc"),
      Bytes.toBytes("ddd"));
    HRegionInfo splitbb = new HRegionInfo(htd.getName(), Bytes.toBytes("ddd"),
    lastEndKey);

    // First test that our Comparator works right up in CatalogJanitor.
    // Just fo kicks.
    SortedMap<HRegionInfo, Result> regions =
      new TreeMap<HRegionInfo, Result>(new CatalogJanitor.SplitParentFirstComparator());
    // Now make sure that this regions map sorts as we expect it to.
    regions.put(parent, createResult(parent, splita, splitb));
    regions.put(splitb, createResult(splitb, splitba, splitbb));
    regions.put(splita, createResult(splita, splitaa, splitab));
    // Assert its properly sorted.
    int index = 0;
    for (Map.Entry<HRegionInfo, Result> e: regions.entrySet()) {
      if (index == 0) {
        assertTrue(e.getKey().getEncodedName().equals(parent.getEncodedName()));
      } else if (index == 1) {
        assertTrue(e.getKey().getEncodedName().equals(splita.getEncodedName()));
      } else if (index == 2) {
        assertTrue(e.getKey().getEncodedName().equals(splitb.getEncodedName()));
      }
      index++;
    }

    // Now play around with the cleanParent function.  Create a ref from splita
    // up to the parent.
    Path splitaRef =
      createReferences(services, htd, parent, splita, Bytes.toBytes("ccc"), false);
    // Make sure actual super parent sticks around because splita has a ref.
    assertFalse(janitor.cleanParent(parent, regions.get(parent)));

    //splitba, and split bb, do not have dirs in fs.  That means that if
    // we test splitb, it should get cleaned up.
    assertTrue(janitor.cleanParent(splitb, regions.get(splitb)));

    // Now remove ref from splita to parent... so parent can be let go and so
    // the daughter splita can be split (can't split if still references).
    // BUT make the timing such that the daughter gets cleaned up before we
    // can get a chance to let go of the parent.
    FileSystem fs = FileSystem.get(htu.getConfiguration());
    assertTrue(fs.delete(splitaRef, true));
    // Create the refs from daughters of splita.
    Path splitaaRef =
      createReferences(services, htd, splita, splitaa, Bytes.toBytes("bbb"), false);
    Path splitabRef =
View Full Code Here

   * parents are still referencing them. This ensures that grandfather regions
   * do not point to deleted parent regions.
   */
  @Test
  public void testScanDoesNotCleanRegionsWithExistingParents() throws Exception {
    HBaseTestingUtility htu = new HBaseTestingUtility();
    setRootDirAndCleanIt(htu, "testScanDoesNotCleanRegionsWithExistingParents");
    Server server = new MockServer(htu);
    MasterServices services = new MockMasterServices(server);

    final HTableDescriptor htd = createHTableDescriptor();

    // Create regions: aaa->{lastEndKey}, aaa->ccc, aaa->bbb, bbb->ccc, etc.

    // Parent
    HRegionInfo parent = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"),
      new byte[0], true);
    // Sleep a second else the encoded name on these regions comes out
    // same for all with same start key and made in same second.
    Thread.sleep(1001);

    // Daughter a
    HRegionInfo splita = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"),
      Bytes.toBytes("ccc"), true);
    Thread.sleep(1001);
    // Make daughters of daughter a; splitaa and splitab.
    HRegionInfo splitaa = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"),
      Bytes.toBytes("bbb"), false);
    HRegionInfo splitab = new HRegionInfo(htd.getName(), Bytes.toBytes("bbb"),
      Bytes.toBytes("ccc"), false);

    // Daughter b
    HRegionInfo splitb = new HRegionInfo(htd.getName(), Bytes.toBytes("ccc"),
        new byte[0]);
    Thread.sleep(1001);

    final Map<HRegionInfo, Result> splitParents =
        new TreeMap<HRegionInfo, Result>(new SplitParentFirstComparator());
    splitParents.put(parent, makeResultFromHRegionInfo(parent, splita, splitb));
    splitParents.put(splita, makeResultFromHRegionInfo(splita, splitaa, splitab));

    CatalogJanitor janitor = spy(new CatalogJanitor(server, services));
    doReturn(new Pair<Integer, Map<HRegionInfo, Result>>(
        10, splitParents)).when(janitor).getSplitParents();

    //create ref from splita to parent
    Path splitaRef =
        createReferences(services, htd, parent, splita, Bytes.toBytes("ccc"), false);

    //parent and A should not be removed
    assertEquals(0, janitor.scan());

    //now delete the ref
    FileSystem fs = FileSystem.get(htu.getConfiguration());
    assertTrue(fs.delete(splitaRef, true));

    //now, both parent, and splita can be deleted
    assertEquals(2, janitor.scan());

View Full Code Here

   @Override
   public void setUp() throws Exception {
     super.setUp();
     SchemaMetrics.setUseTableNameInTest(true);
     TEST_UTIL = new HBaseTestingUtility();
     TESTTABLEDESC = new HTableDescriptor(TABLE);

     TESTTABLEDESC.addFamily(
         new HColumnDescriptor(FAMILY)
         .setMaxVersions(10)
View Full Code Here

  protected HBaseTestingUtility hbaseUtil;
  protected int numServers = 1;
 
  public GoraHBaseTestDriver() {
    super(HBaseStore.class);
    hbaseUtil = new HBaseTestingUtility();
  }
View Full Code Here

    UTIL.shutdownMiniCluster();
  }

  @Test
  public void testIndexHalfStoreFileReaderWithSeekTo() throws Exception {
    HBaseTestingUtility test_util = new HBaseTestingUtility();
    String root_dir = test_util.getDataTestDir("TestIndexHalfStoreFile").toString();
    Path p = new Path(root_dir, "test");
    Configuration conf = test_util.getConfiguration();
    FileSystem fs = FileSystem.get(conf);
    CacheConfig cacheConf = new CacheConfig(conf);
    HFile.Writer w =
        HFile.getWriterFactory(conf, cacheConf).withPath(fs, p).withBlockSize(1024)
            .withComparator(KeyValue.KEY_COMPARATOR).create();
View Full Code Here

  private void doMROnTableTest(String inputFile, String family, String tableName, String data,
      String[] args, int valueMultiplier) throws Exception {

    // Cluster
    HBaseTestingUtility htu1 = new HBaseTestingUtility();

    htu1.startMiniCluster();
    htu1.startMiniMapReduceCluster();

    GenericOptionsParser opts = new GenericOptionsParser(htu1.getConfiguration(), args);
    Configuration conf = opts.getConfiguration();
    args = opts.getRemainingArgs();

    try {

      FileSystem fs = FileSystem.get(conf);
      FSDataOutputStream op = fs.create(new Path(inputFile), true);
      if (data == null) {
        data = "KEY\u001bVALUE1\u001bVALUE2\n";
      }
      op.write(Bytes.toBytes(data));
      op.close();

      final byte[] FAM = Bytes.toBytes(family);
      final byte[] TAB = Bytes.toBytes(tableName);
      final byte[] QA = Bytes.toBytes("A");
      final byte[] QB = Bytes.toBytes("B");

      if (conf.get(IndexImportTsv.BULK_OUTPUT_CONF_KEY) == null) {
        HTableDescriptor desc = new HTableDescriptor(TAB);
        desc.addFamily(new HColumnDescriptor(FAM));
        new HBaseAdmin(conf).createTable(desc);
      }

      IndexImportTsv.createHbaseAdmin(conf);

      Job job = IndexImportTsv.createSubmittableJob(conf, args);
      job.waitForCompletion(false);
      assertTrue(job.isSuccessful());

      HTable table = new HTable(new Configuration(conf), TAB);
      boolean verified = false;
      long pause = conf.getLong("hbase.client.pause", 5 * 1000);
      int numRetries = conf.getInt("hbase.client.retries.number", 5);
      for (int i = 0; i < numRetries; i++) {
        try {
          Scan scan = new Scan();
          // Scan entire family.
          scan.addFamily(FAM);
          ResultScanner resScanner = table.getScanner(scan);
          for (Result res : resScanner) {
            assertTrue(res.size() == 2);
            List<KeyValue> kvs = res.list();
            assertEquals(toU8Str(kvs.get(0).getRow()), toU8Str(Bytes.toBytes("KEY")));
            assertEquals(toU8Str(kvs.get(1).getRow()), toU8Str(Bytes.toBytes("KEY")));
            assertEquals(toU8Str(kvs.get(0).getValue()),
              toU8Str(Bytes.toBytes("VALUE" + valueMultiplier)));
            assertEquals(toU8Str(kvs.get(1).getValue()),
              toU8Str(Bytes.toBytes("VALUE" + 2 * valueMultiplier)));
            // Only one result set is expected, so let it loop.
          }
          verified = true;
          break;
        } catch (NullPointerException e) {
          // If here, a cell was empty. Presume its because updates came in
          // after the scanner had been opened. Wait a while and retry.
        }
        try {
          Thread.sleep(pause);
        } catch (InterruptedException e) {
          // continue
        }
      }
      assertTrue(verified);
    } finally {
      htu1.shutdownMiniMapReduceCluster();
      htu1.shutdownMiniCluster();
    }
  }
View Full Code Here

      conf.setInt("hbase.master.assignment.timeoutmonitor.timeout", 5000);
      conf.setInt("hbase.master.info.port", masterPort.get());
      conf.set("hbase.master.dns.interface", "lo");
      conf.set("hbase.regionserver.dns.interface", "lo");

      testUtil = new HBaseTestingUtility(conf);
      try {
         try {
            testUtil.startMiniCluster();
         } catch (NullPointerException e) {
            // In some systems, this method can throw an NPE due to the system
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.HBaseTestingUtility$SeenRowTracker

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.