Package org.apache.hadoop.hbase.regionserver

Examples of org.apache.hadoop.hbase.regionserver.HRegion$BatchOperationInProgress


        // from special EMPTY_START_ROW to next region's startKey
        HRegionInfo newRegion = new HRegionInfo(htd.getName(),
            HConstants.EMPTY_START_ROW, next.getStartKey());

        // TODO test
        HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
        LOG.info("Table region start key was not empty.  Created new empty region: "
            + newRegion + " " +region);
        fixes++;
      }
View Full Code Here


        HTableDescriptor htd = getTableInfo().getHTD();
        // from curEndKey to EMPTY_START_ROW
        HRegionInfo newRegion = new HRegionInfo(htd.getName(), curEndKey,
            HConstants.EMPTY_START_ROW);

        HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
        LOG.info("Table region end key was not empty. Created new empty region: " + newRegion
            + " " + region);
        fixes++;
      }
View Full Code Here

                + Bytes.toStringBinary(holeStopKey)
                + ".  Creating a new regioninfo and region "
                + "dir in hdfs to plug the hole.");
        HTableDescriptor htd = getTableInfo().getHTD();
        HRegionInfo newRegion = new HRegionInfo(htd.getName(), holeStartKey, holeStopKey);
        HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
        LOG.info("Plugged hold by creating new empty region: "+ newRegion + " " +region);
        fixes++;
      }
View Full Code Here

        // create new empty container region.
        HTableDescriptor htd = getTableInfo().getHTD();
        // from start key to end Key
        HRegionInfo newRegion = new HRegionInfo(htd.getName(), range.getFirst(),
            range.getSecond());
        HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
        LOG.info("Created new empty container region: " +
            newRegion + " to contain regions: " + Joiner.on(",").join(overlap));
        debugLsr(region.getRegionDir());

        // all target regions are closed, should be able to safely cleanup.
        boolean didFix= false;
        Path target = region.getRegionDir();
        for (HbckInfo contained : overlap) {
          LOG.info("Merging " + contained  + " into " + target );
          int merges = mergeRegionDirs(target, contained);
          if (merges > 0) {
            didFix = true;
View Full Code Here

        Bytes.toString(orphanRegionRange.getSecond()) + ")");

    // create new region on hdfs.  move data into place.
    HRegionInfo hri = new HRegionInfo(template.getName(), orphanRegionRange.getFirst(), orphanRegionRange.getSecond());
    LOG.info("Creating new region : " + hri);
    HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, hri, template);
    Path target = region.getRegionDir();

    // rename all the data to new region
    mergeRegionDirs(target, hi);
    fixes++;
  }
View Full Code Here

    Configuration c = conf;
    HRegionInfo rootHRI = new HRegionInfo(HRegionInfo.ROOT_REGIONINFO);
    MasterFileSystem.setInfoFamilyCachingForRoot(false);
    HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
    MasterFileSystem.setInfoFamilyCachingForMeta(false);
    HRegion root = HRegion.createHRegion(rootHRI, rootdir, c,
        HTableDescriptor.ROOT_TABLEDESC);
    HRegion meta = HRegion.createHRegion(metaHRI, rootdir, c,
        HTableDescriptor.META_TABLEDESC);
    MasterFileSystem.setInfoFamilyCachingForRoot(true);
    MasterFileSystem.setInfoFamilyCachingForMeta(true);

    // Add first region from the META table to the ROOT region.
View Full Code Here

    // we can rebuild, move old root and meta out of the way and start
    LOG.info("HDFS regioninfo's seems good.  Sidelining old .META.");
    Path backupDir = sidelineOldRootAndMeta();

    LOG.info("Creating new .META.");
    HRegion meta = createNewRootAndMeta();

    // populate meta
    List<Put> puts = generatePuts(tablesInfo);
    if (puts == null) {
      LOG.fatal("Problem encountered when creating new .META. entries.  " +
        "You may need to restore the previously sidelined -ROOT- and .META.");
      return false;
    }
    meta.put(puts.toArray(new Put[0]));
    meta.close();
    meta.getLog().closeAndDelete();
    LOG.info("Success! .META. table rebuilt.");
    LOG.info("Old -ROOT- and .META. are moved into " + backupDir);
    return true;
  }
View Full Code Here

          }
          super.doWrite(info, logKey, logEdit, htd);
        };
      };
      hlog.rollWriter();
      HRegion region = null;
      try {
        region = openRegion(fs, rootRegionDir, htd, hlog);
        long putTime = runBenchmark(new HLogPutBenchmark(region, htd, numIterations, noSync), numThreads);
        logBenchmarkResult("Summary: threads=" + numThreads + ", iterations=" + numIterations,
          numIterations * numThreads, putTime);
View Full Code Here

    // Mock an HRegion
    byte [] tableName = Bytes.toBytes("table");
    byte [][] familyNames = new byte [][] { FAMILY };
    HTableDescriptor htd = new HTableDescriptor();
    htd.addFamily(new HColumnDescriptor(Bytes.toBytes("f1")));
    HRegion region = mockRegion(tableName, familyNames, regionRootDir);
    HLog hlog = region.getLog();

    // Spin up N threads to each perform M log operations
    LogWriter [] incrementors = new LogWriter[numThreads];
    for (int i=0; i<numThreads; i++) {
      incrementors[i] = new LogWriter(region, tableName, hlog, i,
View Full Code Here

  public void testSharedData() throws IOException {
    byte [] tableName = Bytes.toBytes("testtable");
    byte [][] families = { fam1, fam2, fam3 };

    Configuration hc = initSplit();
    HRegion region = initHRegion(tableName, getName(), hc,
      new Class<?>[]{}, families);

    for (int i = 0; i < 3; i++) {
      addContent(region, fam3);
      region.flushcache();
    }

    region.compactStores();

    byte [] splitRow = region.checkSplit();

    assertNotNull(splitRow);
    HRegion [] regions = split(region, splitRow);
    for (int i = 0; i < regions.length; i++) {
      regions[i] = reopenRegion(regions[i], CoprocessorImpl.class, CoprocessorII.class);
    }
    Coprocessor c = regions[0].getCoprocessorHost().
        findCoprocessor(CoprocessorImpl.class.getName());
    Coprocessor c2 = regions[0].getCoprocessorHost().
        findCoprocessor(CoprocessorII.class.getName());
    Object o = ((CoprocessorImpl)c).getSharedData().get("test1");
    Object o2 = ((CoprocessorII)c2).getSharedData().get("test2");
    assertNotNull(o);
    assertNotNull(o2);
    // to coprocessors get different sharedDatas
    assertFalse(((CoprocessorImpl)c).getSharedData() == ((CoprocessorII)c2).getSharedData());
    for (int i = 1; i < regions.length; i++) {
      c = regions[i].getCoprocessorHost().
          findCoprocessor(CoprocessorImpl.class.getName());
      c2 = regions[i].getCoprocessorHost().
          findCoprocessor(CoprocessorII.class.getName());
      // make sure that all coprocessor of a class have identical sharedDatas
      assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o);
      assertTrue(((CoprocessorII)c2).getSharedData().get("test2") == o2);
    }
    // now have all Environments fail
    for (int i = 0; i < regions.length; i++) {
      try {
        Get g = new Get(regions[i].getStartKey());
        regions[i].get(g, null);
        fail();
      } catch (DoNotRetryIOException xc) {
      }
      assertNull(regions[i].getCoprocessorHost().
          findCoprocessor(CoprocessorII.class.getName()));
    }
    c = regions[0].getCoprocessorHost().
        findCoprocessor(CoprocessorImpl.class.getName());
    assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o);
    c = c2 = null;
    // perform a GC
    System.gc();
    // reopen the region
    region = reopenRegion(regions[0], CoprocessorImpl.class, CoprocessorII.class);
    c = region.getCoprocessorHost().
        findCoprocessor(CoprocessorImpl.class.getName());
    // CPimpl is unaffected, still the same reference
    assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o);
    c2 = region.getCoprocessorHost().
        findCoprocessor(CoprocessorII.class.getName());
    // new map and object created, hence the reference is different
    // hence the old entry was indeed removed by the GC and new one has been created
    assertFalse(((CoprocessorII)c2).getSharedData().get("test2") == o2);
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.regionserver.HRegion$BatchOperationInProgress

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.