Package org.apache.hadoop.hbase.regionserver

Examples of org.apache.hadoop.hbase.regionserver.HRegion$BatchOperationInProgress


  public void testNestedFilterListWithSCVF() throws IOException {
    byte[] columnStatus = Bytes.toBytes("S");
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testNestedFilterListWithSCVF"));
    htd.addFamily(new HColumnDescriptor(FAMILIES[0]));
    HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
    HRegion testRegion = HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(),
      TEST_UTIL.getConfiguration(), htd);
    for(int i=0; i<10; i++) {
      Put p = new Put(Bytes.toBytes("row" + i));
      p.setDurability(Durability.SKIP_WAL);
      p.add(FAMILIES[0], columnStatus, Bytes.toBytes(i%2));
      testRegion.put(p);
    }
    testRegion.flushcache();
    // 1. got rows > "row4"
    Filter rowFilter = new RowFilter(CompareOp.GREATER,new BinaryComparator(Bytes.toBytes("row4")));
    Scan s1 = new Scan();
    s1.setFilter(rowFilter);
    InternalScanner scanner = testRegion.getScanner(s1);
    List<Cell> results = new ArrayList<Cell>();
    int i = 5;
    for (boolean done = true; done; i++) {
      done = scanner.next(results);
      assertTrue(CellUtil.matchingRow(results.get(0), Bytes.toBytes("row" + i)));
      assertEquals(Bytes.toInt(CellUtil.cloneValue(results.get(0))), i%2);
      results.clear();
    }
    // 2. got rows <= "row4" and S=
    FilterList subFilterList = new FilterList(FilterList.Operator.MUST_PASS_ALL);
    Filter subFilter1 = new RowFilter(CompareOp.LESS_OR_EQUAL,
      new BinaryComparator(Bytes.toBytes("row4")));
    subFilterList.addFilter(subFilter1);
    Filter subFilter2 = new SingleColumnValueFilter(FAMILIES[0], columnStatus, CompareOp.EQUAL,
      Bytes.toBytes(0));
    subFilterList.addFilter(subFilter2);
    s1 = new Scan();
    s1.setFilter(subFilterList);
    scanner = testRegion.getScanner(s1);
    results = new ArrayList<Cell>();
    for (i=0; i<=4; i+=2) {
      scanner.next(results);
      assertTrue(CellUtil.matchingRow(results.get(0), Bytes.toBytes("row" + i)));
      assertEquals(Bytes.toInt(CellUtil.cloneValue(results.get(0))), i%2);
      results.clear();
    }
    assertFalse(scanner.next(results));
    // 3. let's begin to verify nested filter list
    // 3.1 add rowFilter, then add subFilterList
    FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ONE);
    filterList.addFilter(rowFilter);
    filterList.addFilter(subFilterList);
    s1 = new Scan();
    s1.setFilter(filterList);
    scanner = testRegion.getScanner(s1);
    results = new ArrayList<Cell>();
    for (i=0; i<=4; i+=2) {
      scanner.next(results);
      assertTrue(CellUtil.matchingRow(results.get(0), Bytes.toBytes("row" + i)));
      assertEquals(Bytes.toInt(CellUtil.cloneValue(results.get(0))), i%2);
      results.clear();
    }
    for (i=5; i<=9; i++) {
      scanner.next(results);
      assertTrue(CellUtil.matchingRow(results.get(0), Bytes.toBytes("row" + i)));
      assertEquals(Bytes.toInt(CellUtil.cloneValue(results.get(0))), i%2);
      results.clear();
    }
    assertFalse(scanner.next(results));
    // 3.2 MAGIC here! add subFilterList first, then add rowFilter
    filterList = new FilterList(FilterList.Operator.MUST_PASS_ONE);
    filterList.addFilter(subFilterList);
    filterList.addFilter(rowFilter);
    s1 = new Scan();
    s1.setFilter(filterList);
    scanner = testRegion.getScanner(s1);
    results = new ArrayList<Cell>();
    for (i=0; i<=4; i+=2) {
      scanner.next(results);
      assertTrue(CellUtil.matchingRow(results.get(0), Bytes.toBytes("row" + i)));
      assertEquals(Bytes.toInt(CellUtil.cloneValue(results.get(0))), i%2);
      results.clear();
    }
    for (i=5; i<=9; i++) {
      scanner.next(results);
      assertTrue(CellUtil.matchingRow(results.get(0), Bytes.toBytes("row" + i)));
      assertEquals(Bytes.toInt(CellUtil.cloneValue(results.get(0))), i%2);
      results.clear();
    }
    assertFalse(scanner.next(results));
    HLog hlog = testRegion.getLog();
    testRegion.close();
    hlog.closeAndDelete();
  }     
View Full Code Here


  /** Mapping of scanner instances to the user who created them */
  private Map<InternalScanner,String> scannerOwners =
      new MapMaker().weakKeys().makeMap();

  void initialize(RegionCoprocessorEnvironment e) throws IOException {
    final HRegion region = e.getRegion();

    Map<byte[], ListMultimap<String,TablePermission>> tables =
        AccessControlLists.loadAll(region);
    // For each table, write out the table's permissions to the respective
    // znode for that table.
View Full Code Here

  @Override
  public void preOpen(ObserverContext<RegionCoprocessorEnvironment> e)
      throws IOException {
    RegionCoprocessorEnvironment env = e.getEnvironment();
    final HRegion region = env.getRegion();
    if (region == null) {
      LOG.error("NULL region from RegionCoprocessorEnvironment in preOpen()");
    } else {
      HRegionInfo regionInfo = region.getRegionInfo();
      if (isSpecialTable(regionInfo)) {
        isSystemOrSuperUser(regionEnv.getConfiguration());
      } else {
        requirePermission("preOpen", Action.ADMIN);
      }
View Full Code Here

  }

  @Override
  public void postOpen(ObserverContext<RegionCoprocessorEnvironment> c) {
    RegionCoprocessorEnvironment env = c.getEnvironment();
    final HRegion region = env.getRegion();
    if (region == null) {
      LOG.error("NULL region from RegionCoprocessorEnvironment in postOpen()");
      return;
    }
    if (AccessControlLists.isAclRegion(region)) {
View Full Code Here

  public Service getService() {
    return AccessControlProtos.AccessControlService.newReflectiveService(this);
  }

  private TableName getTableName(RegionCoprocessorEnvironment e) {
    HRegion region = e.getRegion();
    TableName tableName = null;

    if (region != null) {
      HRegionInfo regionInfo = region.getRegionInfo();
      if (regionInfo != null) {
        tableName = regionInfo.getTable();
      }
    }
    return tableName;
View Full Code Here

        Bytes.toString(orphanRegionRange.getSecond()) + ")");

    // create new region on hdfs.  move data into place.
    HRegionInfo hri = new HRegionInfo(template.getTableName(), orphanRegionRange.getFirst(), orphanRegionRange.getSecond());
    LOG.info("Creating new region : " + hri);
    HRegion region = HBaseFsckRepair.createHDFSRegionDir(getConf(), hri, template);
    Path target = region.getRegionFileSystem().getRegionDir();

    // rename all the data to new region
    mergeRegionDirs(target, hi);
    fixes++;
  }
View Full Code Here

  private HRegion createNewMeta() throws IOException {
    Path rootdir = FSUtils.getRootDir(getConf());
    Configuration c = getConf();
    HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
    MasterFileSystem.setInfoFamilyCachingForMeta(false);
    HRegion meta = HRegion.createHRegion(metaHRI, rootdir, c,
        HTableDescriptor.META_TABLEDESC);
    MasterFileSystem.setInfoFamilyCachingForMeta(true);
    return meta;
  }
View Full Code Here

    // we can rebuild, move old meta out of the way and start
    LOG.info("HDFS regioninfo's seems good.  Sidelining old hbase:meta");
    Path backupDir = sidelineOldMeta();

    LOG.info("Creating new hbase:meta");
    HRegion meta = createNewMeta();

    // populate meta
    List<Put> puts = generatePuts(tablesInfo);
    if (puts == null) {
      LOG.fatal("Problem encountered when creating new hbase:meta entries.  " +
        "You may need to restore the previously sidelined hbase:meta");
      return false;
    }
    meta.batchMutate(puts.toArray(new Put[0]));
    HRegion.closeHRegion(meta);
    LOG.info("Success! hbase:meta table rebuilt.");
    LOG.info("Old hbase:meta is moved into " + backupDir);
    return true;
  }
View Full Code Here

    HColumnDescriptor hcd = (new HColumnDescriptor(CF_NAME)).setMaxVersions(MAX_VERSIONS).
        setDataBlockEncoding(encoding).
        setEncodeOnDisk(encodeOnDisk).
        setBlocksize(BLOCK_SIZE).
        setBloomFilterType(BloomType.NONE);
    HRegion region = testUtil.createTestRegion(TABLE_NAME, hcd);

    //write the data, but leave some in the memstore
    doPuts(region);

    //verify correctness when memstore contains data
    doGets(region);

    //verify correctness again after compacting
    region.compactStores();
    doGets(region);


    Map<DataBlockEncoding, Integer> encodingCounts = cache.getEncodingCountsForTest();
View Full Code Here

        // from special EMPTY_START_ROW to next region's startKey
        HRegionInfo newRegion = new HRegionInfo(htd.getTableName(),
            HConstants.EMPTY_START_ROW, next.getStartKey());

        // TODO test
        HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
        LOG.info("Table region start key was not empty.  Created new empty region: "
            + newRegion + " " +region);
        fixes++;
      }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.regionserver.HRegion$BatchOperationInProgress

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.