Package org.apache.hadoop.hbase.regionserver

Examples of org.apache.hadoop.hbase.regionserver.HRegion$BatchOperationInProgress


        HTableDescriptor htd = getTableInfo().getHTD();
        // from curEndKey to EMPTY_START_ROW
        HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), curEndKey,
            HConstants.EMPTY_START_ROW);

        HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
        LOG.info("Table region end key was not empty.  Created new empty region: " + newRegion
            + " " + region);
        fixes++;
      }
View Full Code Here


                + Bytes.toStringBinary(holeStopKey)
                + ".  Creating a new regioninfo and region "
                + "dir in hdfs to plug the hole.");
        HTableDescriptor htd = getTableInfo().getHTD();
        HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), holeStartKey, holeStopKey);
        HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
        LOG.info("Plugged hold by creating new empty region: "+ newRegion + " " +region);
        fixes++;
      }
View Full Code Here

        // create new empty container region.
        HTableDescriptor htd = getTableInfo().getHTD();
        // from start key to end Key
        HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), range.getFirst(),
            range.getSecond());
        HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
        LOG.info("Created new empty container region: " +
            newRegion + " to contain regions: " + Joiner.on(",").join(overlap));
        debugLsr(region.getRegionFileSystem().getRegionDir());

        // all target regions are closed, should be able to safely cleanup.
        boolean didFix= false;
        Path target = region.getRegionFileSystem().getRegionDir();
        for (HbckInfo contained : overlap) {
          LOG.info("Merging " + contained  + " into " + target );
          int merges = mergeRegionDirs(target, contained);
          if (merges > 0) {
            didFix = true;
View Full Code Here

    // deciding what blocks to cache-on-write on compaction.
    final String table = "CompactionCacheOnWrite";
    final String cf = "myCF";
    final byte[] cfBytes = Bytes.toBytes(cf);
    final int maxVersions = 3;
    HRegion region = TEST_UTIL.createTestRegion(table,
        new HColumnDescriptor(cf)
            .setCompressionType(compress)
            .setBloomFilterType(BLOOM_TYPE)
            .setMaxVersions(maxVersions)
            .setDataBlockEncoding(encoder.getEncodingInCache())
            .setEncodeOnDisk(encoder.getEncodingOnDisk() !=
                DataBlockEncoding.NONE)
    );
    int rowIdx = 0;
    long ts = EnvironmentEdgeManager.currentTimeMillis();
    for (int iFile = 0; iFile < 5; ++iFile) {
      for (int iRow = 0; iRow < 500; ++iRow) {
        String rowStr = "" + (rowIdx * rowIdx * rowIdx) + "row" + iFile + "_" +
            iRow;
        Put p = new Put(Bytes.toBytes(rowStr));
        ++rowIdx;
        for (int iCol = 0; iCol < 10; ++iCol) {
          String qualStr = "col" + iCol;
          String valueStr = "value_" + rowStr + "_" + qualStr;
          for (int iTS = 0; iTS < 5; ++iTS) {
            p.add(cfBytes, Bytes.toBytes(qualStr), ts++,
                Bytes.toBytes(valueStr));
          }
        }
        region.put(p);
      }
      region.flushcache();
    }
    LruBlockCache blockCache =
        (LruBlockCache) new CacheConfig(conf).getBlockCache();
    blockCache.clearCache();
    assertEquals(0, blockCache.getBlockTypeCountsForTest().size());
    region.compactStores();
    LOG.debug("compactStores() returned");

    Map<BlockType, Integer> blockTypesInCache =
        blockCache.getBlockTypeCountsForTest();
    LOG.debug("Block types in cache: " + blockTypesInCache);
    assertNull(blockTypesInCache.get(BlockType.DATA));
    region.close();
    blockCache.shutdown();
  }
View Full Code Here

    HTableDescriptor htd = new HTableDescriptor(tableWithRefsName);
    htd.addFamily(new HColumnDescriptor(TEST_FAMILY));

    // First region, simple with one plain hfile.
    HRegion r0 = HRegion.createHRegion(new HRegionInfo(htd.getName()), archiveDir,
        conf, htd, null, true, true);
    Path storeFile = new Path(new Path(r0.getRegionDir(), TEST_FAMILY), TEST_HFILE);
    FSDataOutputStream out = fs.create(storeFile);
    out.write(Bytes.toBytes("Test Data"));
    out.close();
    r0.close();

    // Second region, used to test the split case.
    // This region contains a reference to the hfile in the first region.
    HRegion r1 = HRegion.createHRegion(new HRegionInfo(htd.getName()), archiveDir,
        conf, htd, null, true, true);
    out = fs.create(new Path(new Path(r1.getRegionDir(), TEST_FAMILY),
        storeFile.getName() + '.' + r0.getRegionInfo().getEncodedName()));
    out.write(Bytes.toBytes("Test Data"));
    out.close();
    r1.close();

    Path tableDir = HTableDescriptor.getTableDir(archiveDir, tableWithRefsName);
    Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
    FileUtil.copy(fs, tableDir, fs, snapshotDir, false, conf);
    SnapshotDescriptionUtils.writeSnapshotInfo(sd, snapshotDir, fs);
View Full Code Here

    }

    private HRegion openRegion(final Path tableDir, final FileSystem fs,
        final Configuration conf, final HRegionInfo hri,
        final HTableDescriptor htd) throws IOException {
      HRegion r = HRegion.newHRegion(tableDir, null, fs, conf, hri, htd, null);
      r.initialize(null);
      return r;
    }
View Full Code Here

  public int getServerWith(byte[] regionName) {
    int index = -1;
    int count = 0;
    for (JVMClusterUtil.RegionServerThread rst: getRegionServerThreads()) {
      HRegionServer hrs = rst.getRegionServer();
      HRegion metaRegion =
        hrs.getOnlineRegion(regionName);
      if (metaRegion != null) {
        index = count;
        break;
      }
View Full Code Here

        // from special EMPTY_START_ROW to next region's startKey
        HRegionInfo newRegion = new HRegionInfo(htd.getName(),
            HConstants.EMPTY_START_ROW, next.getStartKey());

        // TODO test
        HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
        LOG.info("Table region start key was not empty.  Created new empty region: "
            + newRegion + " " +region);
        fixes++;
      }
View Full Code Here

        HTableDescriptor htd = getTableInfo().getHTD();
        // from curEndKey to EMPTY_START_ROW
        HRegionInfo newRegion = new HRegionInfo(htd.getName(), curEndKey,
            HConstants.EMPTY_START_ROW);

        HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
        LOG.info("Table region end key was not empty. Created new empty region: " + newRegion
            + " " + region);
        fixes++;
      }
View Full Code Here

                + Bytes.toStringBinary(holeStopKey)
                + ".  Creating a new regioninfo and region "
                + "dir in hdfs to plug the hole.");
        HTableDescriptor htd = getTableInfo().getHTD();
        HRegionInfo newRegion = new HRegionInfo(htd.getName(), holeStartKey, holeStopKey);
        HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
        LOG.info("Plugged hold by creating new empty region: "+ newRegion + " " +region);
        fixes++;
      }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.regionserver.HRegion$BatchOperationInProgress

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.