Package org.apache.hadoop.hbase.master

Examples of org.apache.hadoop.hbase.master.AssignmentManager$CreateUnassignedAsyncCallback


    } catch (IOException e) {
      String node = ZKAssign.getNodeName(regionServer.getZooKeeper(), region
          .getRegionInfo().getEncodedName());

      assertFalse(ZKUtil.checkExists(regionServer.getZooKeeper(), node) == -1);
      AssignmentManager am = cluster.getMaster().getAssignmentManager();
      for (int i = 0; !am.getRegionsInTransition().containsKey(
          region.getRegionInfo().getEncodedName())
          && i < 100; i++) {
        Thread.sleep(200);
      }
      assertTrue("region is not in transition "+region,
          am.getRegionsInTransition().containsKey(region.getRegionInfo().getEncodedName()));
      RegionState regionState = am.getRegionsInTransition().get(region.getRegionInfo()
          .getEncodedName());
      assertTrue(regionState.getState() == RegionState.State.SPLITTING);
      assertTrue(st.rollback(regionServer, regionServer));
      assertTrue(ZKUtil.checkExists(regionServer.getZooKeeper(), node) == -1);
      for (int i=0; am.getRegionsInTransition().containsKey(region.getRegionInfo().getEncodedName()) && i<100; i++) {
        // Just in case the nodeDeleted event did not get executed.
        Thread.sleep(200);
      }
      assertFalse("region is still in transition",
          am.getRegionsInTransition().containsKey(region.getRegionInfo().getEncodedName()));
    }
    if (admin.isTableAvailable(tableName) && admin.isTableEnabled(tableName)) {
      admin.disableTable(tableName);
      admin.deleteTable(tableName);
      admin.close();
View Full Code Here


    // setup the snapshot
    prepareToTakeSnapshot(snapshot);

    // if the table is enabled, then have the RS run actually the snapshot work
    AssignmentManager assignmentMgr = master.getAssignmentManager();
    if (assignmentMgr.getZKTable().isEnabledTable(snapshot.getTable())) {
      LOG.debug("Table enabled, starting distributed snapshot.");
      snapshotEnabledTable(snapshot);
      LOG.debug("Started snapshot: " + ClientSnapshotDescriptionUtils.toString(snapshot));
    }
    // For disabled table, snapshot is created by the master
    else if (assignmentMgr.getZKTable().isDisabledTable(snapshot.getTable())) {
      LOG.debug("Table is disabled, running snapshot entirely on master.");
      snapshotDisabledTable(snapshot);
      LOG.debug("Started snapshot: " + ClientSnapshotDescriptionUtils.toString(snapshot));
    } else {
      LOG.error("Can't snapshot table '" + snapshot.getTable()
View Full Code Here

      // Clean out anything in regions in transition.  Being conservative and
      // doing after log splitting.  Could do some states before -- OPENING?
      // OFFLINE? -- and then others after like CLOSING that depend on log
      // splitting.
      AssignmentManager am = services.getAssignmentManager();
      List<HRegionInfo> regionsInTransition = am.processServerShutdown(serverName);
      LOG.info("Reassigning " + ((hris == null)? 0: hris.size()) +
        " region(s) that " + (serverName == null? "null": serverName+
        " was carrying (and " + regionsInTransition.size() +
        " regions(s) that were opening on this server)");

      List<HRegionInfo> toAssignRegions = new ArrayList<HRegionInfo>();
      toAssignRegions.addAll(regionsInTransition);

      // Iterate regions that were on this server and assign them
      if (hris != null) {
        RegionStates regionStates = am.getRegionStates();
        for (Map.Entry<HRegionInfo, Result> e: hris.entrySet()) {
          HRegionInfo hri = e.getKey();
          if (regionsInTransition.contains(hri)) {
            continue;
          }
          RegionState rit = regionStates.getRegionTransitionState(hri);
          if (processDeadRegion(hri, e.getValue(), am, server.getCatalogTracker())) {
            ServerName addressFromAM = regionStates.getRegionServerOfRegion(hri);
            if (addressFromAM != null && !addressFromAM.equals(this.serverName)) {
              // If this region is in transition on the dead server, it must be
              // opening or pending_open, which should have been covered by AM#processServerShutdown
              LOG.info("Skip assigning region " + hri.getRegionNameAsString()
                + " because it has been opened in " + addressFromAM.getServerName());
              continue;
            }
            if (rit != null) {
              if (!rit.isOnServer(serverName)
                  || rit.isClosed() || rit.isOpened() || rit.isSplit()) {
                // Skip regions that are in transition on other server,
                // or in state closed/opened/split
                LOG.info("Skip assigning region " + rit);
                continue;
              }
              try{
                //clean zk node
                LOG.info("Reassigning region with rs = " + rit + " and deleting zk node if exists");
                ZKAssign.deleteNodeFailSilent(services.getZooKeeper(), hri);
              } catch (KeeperException ke) {
                this.server.abort("Unexpected ZK exception deleting unassigned node " + hri, ke);
                return;
              }
            }
            toAssignRegions.add(hri);
          } else if (rit != null) {
            if (rit.isSplitting() || rit.isSplit()) {
              // This will happen when the RS went down and the call back for the SPLIITING or SPLIT
              // has not yet happened for node Deleted event. In that case if the region was actually
              // split
              // but the RS had gone down before completing the split process then will not try to
              // assign the parent region again. In that case we should make the region offline and
              // also delete the region from RIT.
              am.regionOffline(hri);
            } else if ((rit.isClosing() || rit.isPendingClose())
                && am.getZKTable().isDisablingOrDisabledTable(hri.getTableNameAsString())) {
              // If the table was partially disabled and the RS went down, we should clear the RIT
              // and remove the node for the region.
              // The rit that we use may be stale in case the table was in DISABLING state
              // but though we did assign we will not be clearing the znode in CLOSING state.
              // Doing this will have no harm. See HBASE-5927
              am.deleteClosingOrClosedNode(hri);
              am.regionOffline(hri);
            } else {
              LOG.warn("THIS SHOULD NOT HAPPEN: unexpected region in transition "
                + rit + " not to be assigned by SSH of server " + serverName);
            }
          }
        }
      }
      try {
        am.assign(toAssignRegions);
      } catch (InterruptedException ie) {
        LOG.error("Caught " + ie + " during round-robin assignment");
        throw new IOException(ie);
      }
    } finally {
View Full Code Here

    if (cpHost != null) {
      cpHost.preDeleteTableHandler(this.tableName);
    }

    // 1. Wait because of region in transition
    AssignmentManager am = this.masterServices.getAssignmentManager();
    RegionStates states = am.getRegionStates();
    long waitTime = server.getConfiguration().
      getLong("hbase.master.wait.on.region", 5 * 60 * 1000);
    for (HRegionInfo region : regions) {
      long done = System.currentTimeMillis() + waitTime;
      while (System.currentTimeMillis() < done) {
        if (states.isRegionFailedToOpen(region)) {
          am.regionOffline(region);
        }
        if (!states.isRegionInTransition(region)) break;
        Threads.sleep(waitingTimeForEvents);
        LOG.debug("Waiting on region to clear regions in transition; "
          + am.getRegionStates().getRegionTransitionState(region));
      }
      if (states.isRegionInTransition(region)) {
        throw new IOException("Waited hbase.master.wait.on.region (" +
          waitTime + "ms) for region to leave region " +
          region.getRegionNameAsString() + " in transitions");
      }
    }

    // 2. Remove regions from META
    LOG.debug("Deleting regions from META");
    MetaEditor.deleteRegions(this.server.getCatalogTracker(), regions);

    // 3. Move the table in /hbase/.tmp
    MasterFileSystem mfs = this.masterServices.getMasterFileSystem();
    Path tempTableDir = mfs.moveTableToTemp(tableName);

    try {
      // 4. Delete regions from FS (temp directory)
      FileSystem fs = mfs.getFileSystem();
      for (HRegionInfo hri: regions) {
        LOG.debug("Archiving region " + hri.getRegionNameAsString() + " from FS");
        HFileArchiver.archiveRegion(fs, mfs.getRootDir(),
            tempTableDir, new Path(tempTableDir, hri.getEncodedName()));
      }

      // 5. Delete table from FS (temp directory)
      if (!fs.delete(tempTableDir, true)) {
        LOG.error("Couldn't delete " + tempTableDir);
      }

      LOG.debug("Table '" + Bytes.toString(tableName) + "' archived!");
    } finally {
      String tableNameStr = Bytes.toString(tableName);
      // 6. Update table descriptor cache
      LOG.debug("Removing '" + tableNameStr + "' descriptor.");
      this.masterServices.getTableDescriptors().remove(Bytes.toString(tableName));

      // 7. If entry for this table in zk, and up in AssignmentManager, remove it.
      LOG.debug("Marking '" + tableNameStr + "' as deleted.");
      am.getZKTable().setDeletedTable(tableNameStr);
    }

    if (cpHost != null) {
      cpHost.postDeleteTableHandler(this.tableName);
    }
View Full Code Here

    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    HMaster master = cluster.getMaster();
    HBaseAdmin localAdmin = createTable(tableName);
    List<HRegionInfo> tableRegions = localAdmin.getTableRegions(tableName);
    HRegionInfo hri = tableRegions.get(0);
    AssignmentManager am = master.getAssignmentManager();
    ServerName server = am.getRegionStates().getRegionServerOfRegion(hri);
    localAdmin.move(hri.getEncodedNameAsBytes(), Bytes.toBytes(server.getServerName()));
    assertEquals("Current region server and region server before move should be same.", server,
      am.getRegionStates().getRegionServerOfRegion(hri));
  }
View Full Code Here

    TEST_UTIL.startMiniCluster(3);

    executorService = new ThreadPoolExecutor(1, Integer.MAX_VALUE, 60, TimeUnit.SECONDS,
        new SynchronousQueue<Runnable>(), Threads.newDaemonThreadFactory("testhbck"));

    AssignmentManager assignmentManager =
      TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager();
    regionStates = assignmentManager.getRegionStates();
  }
View Full Code Here

          FSUtils.getTableStoreFilePathMap(null, fs, rootDir, tableName);
      assertEquals("Expected nothing but found " + storefilesAfter.toString(),
          storefilesAfter.size(), 0);

      hri = region.getRegionInfo(); // split parent
      AssignmentManager am = cluster.getMaster().getAssignmentManager();
      RegionStates regionStates = am.getRegionStates();
      long start = EnvironmentEdgeManager.currentTime();
      while (!regionStates.isRegionInState(hri, State.SPLIT)) {
        assertFalse("Timed out in waiting split parent to be in state SPLIT",
          EnvironmentEdgeManager.currentTime() - start > 60000);
        Thread.sleep(500);
      }

      // We should not be able to assign it again
      am.assign(hri, true, true);
      assertFalse("Split region can't be assigned",
        regionStates.isRegionInTransition(hri));
      assertTrue(regionStates.isRegionInState(hri, State.SPLIT));

      // We should not be able to unassign it either
      am.unassign(hri, true, null);
      assertFalse("Split region can't be unassigned",
        regionStates.isRegionInTransition(hri));
      assertTrue(regionStates.isRegionInState(hri, State.SPLIT));
    } finally {
      admin.setBalancerRunning(true, false);
View Full Code Here

      // turn balancer off
      master.balanceSwitch(false);

      // wait for assignments to finish, if any
      AssignmentManager mgr = master.getAssignmentManager();
      Collection<RegionState> transRegions =
        mgr.getRegionStates().getRegionsInTransition().values();
      for (RegionState state : transRegions) {
        mgr.getRegionStates().waitOnRegionToClearRegionsInTransition(state.getRegion());
      }

      // move half the open regions from RS 0 to RS 1
      HRegionServer rs = cluster.getRegionServer(0);
      byte[] destRS = Bytes.toBytes(cluster.getRegionServer(1).getServerName().toString());
View Full Code Here

    }
  }

  private void waitForRITtoBeZero(HMaster master) throws Exception {
    // wait for assignments to finish
    AssignmentManager mgr = master.getAssignmentManager();
    Collection<RegionState> transRegions =
      mgr.getRegionStates().getRegionsInTransition().values();
    for (RegionState state : transRegions) {
      mgr.getRegionStates().waitOnRegionToClearRegionsInTransition(state.getRegion());
    }
  }
View Full Code Here

            + " from list of regions to assign because region state: " + rs.getState());
        metaHRIs.remove(rs.getRegion());
      }
    }

    AssignmentManager assignmentManager = this.services.getAssignmentManager();
    for (Map.Entry<HRegionInfo, Result> e : metaHRIs.entrySet()) {
      RegionState rit =
          assignmentManager.getRegionsInTransition().get(e.getKey().getEncodedName());

      if (processDeadRegion(e.getKey(), e.getValue(), assignmentManager,
        this.server.getCatalogTracker())) {
        ServerName addressFromAM = assignmentManager.getRegionServerOfRegion(e.getKey());
        if (rit != null && !rit.isClosing() && !rit.isPendingClose() && !rit.isSplitting()
            && !ritsGoingToServer.contains(e.getKey())) {
          // Skip regions that were in transition unless CLOSING or
          // PENDING_CLOSE
          LOG.info("Skip assigning region " + rit.toString());
        } else if (addressFromAM != null && !addressFromAM.equals(this.serverName)) {
          LOG.debug("Skip assigning region " + e.getKey().getRegionNameAsString()
              + " because it has been opened in " + addressFromAM.getServerName());
          ritsGoingToServer.remove(e.getKey());
        } else {
          if (rit != null) {
            // clean zk node
            try {
              LOG.info("Reassigning region with rs =" + rit + " and deleting zk node if exists");
              ZKAssign.deleteNodeFailSilent(services.getZooKeeper(), e.getKey());
            } catch (KeeperException ke) {
              this.server.abort("Unexpected ZK exception deleting unassigned node " + e.getKey(),
                ke);
              return null;
            }
          }
          toAssign.add(e.getKey());
        }
      } else if (rit != null && (rit.isSplitting() || rit.isSplit())) {
        // This will happen when the RS went down and the call back for the SPLIITING or SPLIT
        // has not yet happened for node Deleted event. In that case if the region was actually
        // split but the RS had gone down before completing the split process then will not try
        // to assign the parent region again. In that case we should make the region offline
        // and also delete the region from RIT.
        HRegionInfo region = rit.getRegion();
        AssignmentManager am = assignmentManager;
        am.regionOffline(region);
        ritsGoingToServer.remove(region);
      }
      // If the table was partially disabled and the RS went down, we should clear the RIT
      // and remove the node for the region. The rit that we use may be stale in case the table
      // was in DISABLING state but though we did assign we will not be clearing the znode in
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.master.AssignmentManager$CreateUnassignedAsyncCallback

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.