Package org.apache.hadoop.hbase

Examples of org.apache.hadoop.hbase.HRegion$WriteState


   */
  public HRegion getMetaRegion(HRegionInfo metaInfo) throws IOException {
    if (!initialized) {
      throw new IllegalStateException("Must call initialize method first.");
    }
    HRegion meta = metaRegions.get(metaInfo.getRegionName());
    if (meta == null) {
      meta = openMetaRegion(metaInfo);
      metaRegions.put(metaInfo.getRegionName(), meta);
    }
    return meta;
View Full Code Here


      throw new IllegalStateException("Must call initialize method first.");
    }
   
    // Open meta region so we can scan it

    HRegion metaRegion = openMetaRegion(metaRegionInfo);

    HScannerInterface metaScanner = metaRegion.getScanner(
        HConstants.COL_REGIONINFO_ARRAY, HConstants.EMPTY_START_ROW,
        HConstants.LATEST_TIMESTAMP, null);

    try {
      HStoreKey key = new HStoreKey();
View Full Code Here

        this.rootdir, this.log, this.conf);
    this.rootRegion.compactStores();
  }
 
  private HRegion openMetaRegion(HRegionInfo metaInfo) throws IOException {
    HRegion meta =
      HRegion.openHRegion(metaInfo, this.rootdir, this.log, this.conf);
    meta.compactStores();
    return meta;
  }
View Full Code Here

        conf, null);

    try {
      // Open root region so we can scan it

      HRegion rootRegion = new HRegion(
          new Path(rootdir, HConstants.ROOT_TABLE_NAME.toString()), log, fs, conf,
          HRegionInfo.rootRegionInfo, null, null);

      try {
        HScannerInterface rootScanner = rootRegion.getScanner(
            HConstants.COL_REGIONINFO_ARRAY, HConstants.EMPTY_START_ROW,
            HConstants.LATEST_TIMESTAMP, null);

        try {
          HStoreKey key = new HStoreKey();
          SortedMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
          while (rootScanner.next(key, results)) {
            HRegionInfo info = Writables.getHRegionInfoOrNull(
                results.get(HConstants.COL_REGIONINFO));
            if (info == null) {
              LOG.warn("region info is null for row " + key.getRow() +
                  " in table " + HConstants.ROOT_TABLE_NAME);
              continue;
            }

            // First move the meta region to where it should be and rename
            // subdirectories as necessary

            migrateRegionDir(fs, rootdir, HConstants.META_TABLE_NAME,
                new Path(rootdir, OLD_PREFIX + info.getEncodedName()));

            // Now scan and process the meta table

            scanMetaRegion(fs, rootdir, log, info);
          }

        } finally {
          rootScanner.close();
        }

      } finally {
        rootRegion.close();
      }

    } finally {
      log.closeAndDelete();
    }
View Full Code Here

  }
 
  private void scanMetaRegion(FileSystem fs, Path rootdir, HLog log,
      HRegionInfo info) throws IOException {

    HRegion metaRegion = new HRegion(
        new Path(rootdir, info.getTableDesc().getName().toString()), log, fs,
        conf, info, null, null);

    try {
      HScannerInterface metaScanner = metaRegion.getScanner(
          HConstants.COL_REGIONINFO_ARRAY, HConstants.EMPTY_START_ROW,
          HConstants.LATEST_TIMESTAMP, null);

      try {
        HStoreKey key = new HStoreKey();
        SortedMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
        while (metaScanner.next(key, results)) {
          HRegionInfo region = Writables.getHRegionInfoOrNull(
              results.get(HConstants.COL_REGIONINFO));
          if (region == null) {
            LOG.warn("region info is null for row " + key.getRow() +
                " in table " + HConstants.META_TABLE_NAME);
            continue;
          }

          // Move the region to where it should be and rename
          // subdirectories as necessary

          migrateRegionDir(fs, rootdir, region.getTableDesc().getName(),
              new Path(rootdir, OLD_PREFIX + region.getEncodedName()));

          results.clear();
        }

      } finally {
        metaScanner.close();
      }

    } finally {
      metaRegion.close();
    }
  }
View Full Code Here

          );
          regions[i].batchUpdate(HConstants.LATEST_TIMESTAMP, b);
        }
      }
      // Create root region
      HRegion root = HRegion.createHRegion(HRegionInfo.rootRegionInfo,
          this.rootdir, this.conf);
      // Create meta region
      HRegion meta = HRegion.createHRegion(HRegionInfo.firstMetaRegionInfo,
          this.rootdir, this.conf);
      // Insert meta into root region
      HRegion.addRegionToMETA(root, meta);
      // Insert the regions we created into the meta
      for(int i = 0; i < regions.length; i++) {
        HRegion.addRegionToMETA(meta, regions[i]);
      }
      // Close root and meta regions
      root.close();
      root.getLog().closeAndDelete();
      meta.close();
      meta.getLog().closeAndDelete();
     
    } catch (Exception e) {
      StaticTestEnvironment.shutdownDfs(dfsCluster);
      throw e;
    }
View Full Code Here

      );
      HRegionInfo mergedInfo = merger.getMergedHRegionInfo();
   
      // Now verify that we can read all the rows from regions 0, 1
      // in the new merged region.
      HRegion merged =
        HRegion.openHRegion(mergedInfo, this.rootdir, log, this.conf);
     
      for (int i = 0; i < 2 ; i++) {
        for (int j = 0; j < rows[i].length; j++) {
          byte[] bytes = merged.get(rows[i][j], COLUMN_NAME);
          assertNotNull(rows[i][j].toString(), bytes);
          Text value = new Text(bytes);
          assertTrue(value.equals(rows[i][j]));
        }
      }
      merged.close();
      LOG.info("verified merge of regions 0 and 1");
      /*
       * Merge the result of merging regions 0 and 1 with region 2
       */
      LOG.info("merging regions 0+1 and 2");
      merger = new Merge(this.conf);
      ToolRunner.run(merger,
          new String[] {
            this.desc.getName().toString(),
            mergedInfo.getRegionName().toString(),
            this.sourceRegions[2].getRegionName().toString()
          }
      );
      mergedInfo = merger.getMergedHRegionInfo();

      // Now verify that we can read all the rows from regions 0, 1 and 2
      // in the new merged region.
     
      merged = HRegion.openHRegion(mergedInfo, this.rootdir, log, this.conf);

      for (int i = 0; i < 3 ; i++) {
        for (int j = 0; j < rows[i].length; j++) {
          byte[] bytes = merged.get(rows[i][j], COLUMN_NAME);
          assertNotNull(bytes);
          Text value = new Text(bytes);
          assertTrue(value.equals(rows[i][j]));
        }
      }
      merged.close();
      LOG.info("verified merge of regions 0+1 and 2");
      /*
       * Merge the result of merging regions 0, 1 and 2 with region 3
       */
      LOG.info("merging regions 0+1+2 and 3");
      merger = new Merge(this.conf);
      ToolRunner.run(merger,
          new String[] {
            this.desc.getName().toString(),
            mergedInfo.getRegionName().toString(),
            this.sourceRegions[3].getRegionName().toString()
          }
      );
      mergedInfo = merger.getMergedHRegionInfo();
     
      // Now verify that we can read all the rows from regions 0, 1, 2 and 3
      // in the new merged region.
     
      merged = HRegion.openHRegion(mergedInfo, this.rootdir, log, this.conf);
     
      for (int i = 0; i < 4 ; i++) {
        for (int j = 0; j < rows[i].length; j++) {
          byte[] bytes = merged.get(rows[i][j], COLUMN_NAME);
          assertNotNull(bytes);
          Text value = new Text(bytes);
          assertTrue(value.equals(rows[i][j]));
        }
      }
      merged.close();
      LOG.info("verified merge of regions 0+1+2 and 3");
      /*
       * Merge the result of merging regions 0, 1, 2 and 3 with region 4
       */
      LOG.info("merging regions 0+1+2+3 and 4");
      merger = new Merge(this.conf);
      ToolRunner.run(merger,
          new String[] {
            this.desc.getName().toString(),
            mergedInfo.getRegionName().toString(),
            this.sourceRegions[4].getRegionName().toString()
          }
      );
      mergedInfo = merger.getMergedHRegionInfo();
     
      // Now verify that we can read all the rows from the new merged region.

      merged = HRegion.openHRegion(mergedInfo, this.rootdir, log, this.conf);
     
      for (int i = 0; i < rows.length ; i++) {
        for (int j = 0; j < rows[i].length; j++) {
          byte[] bytes = merged.get(rows[i][j], COLUMN_NAME);
          assertNotNull(bytes);
          Text value = new Text(bytes);
          assertTrue(value.equals(rows[i][j]));
        }
      }
      merged.close();
      LOG.info("verified merge of regions 0+1+2+3 and 4");
     
    } finally {
      log.closeAndDelete();
    }
View Full Code Here

   */
  public HRegion getMetaRegion(HRegionInfo metaInfo) throws IOException {
    if (!initialized) {
      throw new IllegalStateException("Must call initialize method first.");
    }
    HRegion meta = metaRegions.get(metaInfo.getRegionName());
    if (meta == null) {
      meta = openMetaRegion(metaInfo);
      metaRegions.put(metaInfo.getRegionName(), meta);
    }
    return meta;
View Full Code Here

      throw new IllegalStateException("Must call initialize method first.");
    }
   
    // Open meta region so we can scan it

    HRegion metaRegion = openMetaRegion(metaRegionInfo);

    HScannerInterface metaScanner = metaRegion.getScanner(
        HConstants.COL_REGIONINFO_ARRAY, HConstants.EMPTY_START_ROW,
        HConstants.LATEST_TIMESTAMP, null);

    try {
      HStoreKey key = new HStoreKey();
View Full Code Here

        this.rootdir, this.log, this.conf);
    this.rootRegion.compactStores();
  }
 
  private HRegion openMetaRegion(HRegionInfo metaInfo) throws IOException {
    HRegion meta =
      HRegion.openHRegion(metaInfo, this.rootdir, this.log, this.conf);
    meta.compactStores();
    return meta;
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.HRegion$WriteState

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.