Package org.apache.hadoop.hbase.master

Examples of org.apache.hadoop.hbase.master.AssignmentManager$ExistsUnassignedAsyncCallback


    if (cpHost != null) {
      cpHost.preSnapshot(snapshot, desc);
    }

    // if the table is enabled, then have the RS run actually the snapshot work
    AssignmentManager assignmentMgr = master.getAssignmentManager();
    if (assignmentMgr.getZKTable().isEnabledTable(snapshot.getTable())) {
      LOG.debug("Table enabled, starting distributed snapshot.");
      snapshotEnabledTable(snapshot);
      LOG.debug("Started snapshot: " + SnapshotDescriptionUtils.toString(snapshot));
    }
    // For disabled table, snapshot is created by the master
    else if (assignmentMgr.getZKTable().isDisabledTable(snapshot.getTable())) {
      LOG.debug("Table is disabled, running snapshot entirely on master.");
      snapshotDisabledTable(snapshot);
      LOG.debug("Started snapshot: " + SnapshotDescriptionUtils.toString(snapshot));
    } else {
      LOG.error("Can't snapshot table '" + snapshot.getTable()
View Full Code Here


    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    HMaster master = cluster.getMaster();
    HBaseAdmin localAdmin = createTable(tableName);
    List<HRegionInfo> tableRegions = localAdmin.getTableRegions(tableName);
    HRegionInfo hri = tableRegions.get(0);
    AssignmentManager am = master.getAssignmentManager();
    assertTrue("Region " + hri.getRegionNameAsString()
      + " should be assigned properly", am.waitForAssignment(hri));
    ServerName server = am.getRegionStates().getRegionServerOfRegion(hri);
    localAdmin.move(hri.getEncodedNameAsBytes(), Bytes.toBytes(server.getServerName()));
    assertEquals("Current region server and region server before move should be same.", server,
      am.getRegionStates().getRegionServerOfRegion(hri));
  }
View Full Code Here

  private void recreateTable(final List<HRegionInfo> regions) throws IOException {
    MasterFileSystem mfs = this.masterServices.getMasterFileSystem();
    Path tempdir = mfs.getTempDir();
    FileSystem fs = mfs.getFileSystem();

    AssignmentManager assignmentManager = this.masterServices.getAssignmentManager();

    // 1. Set table znode
    CreateTableHandler.checkAndSetEnablingTable(assignmentManager, tableName);
    try {
      // 1. Create Table Descriptor
      new FSTableDescriptors(server.getConfiguration())
        .createTableDescriptorForTableDirectory(tempdir, this.hTableDescriptor, false);
      Path tempTableDir = FSUtils.getTableDir(tempdir, this.tableName);
      Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), this.tableName);

      HRegionInfo[] newRegions;
      if (this.preserveSplits) {
        newRegions = regions.toArray(new HRegionInfo[regions.size()]);
        LOG.info("Truncate will preserve " + newRegions.length + " regions");
      } else {
        newRegions = new HRegionInfo[1];
        newRegions[0] = new HRegionInfo(this.tableName, null, null);
        LOG.info("Truncate will not preserve the regions");
      }

      // 2. Create Regions
      List<HRegionInfo> regionInfos = ModifyRegionUtils.createRegions(
        masterServices.getConfiguration(), tempdir,
        this.hTableDescriptor, newRegions, null);

      // 3. Move Table temp directory to the hbase root location
      if (!fs.rename(tempTableDir, tableDir)) {
        throw new IOException("Unable to move table from temp=" + tempTableDir +
          " to hbase root=" + tableDir);
      }

      // 4. Add regions to META
      MetaTableAccessor.addRegionsToMeta(masterServices.getShortCircuitConnection(),
        regionInfos);

      // 5. Trigger immediate assignment of the regions in round-robin fashion
      ModifyRegionUtils.assignRegions(assignmentManager, regionInfos);

      // 6. Set table enabled flag up in zk.
      try {
        assignmentManager.getTableStateManager().setTableState(tableName,
          ZooKeeperProtos.Table.State.ENABLED);
      } catch (CoordinatedStateException e) {
        throw new IOException("Unable to ensure that " + tableName + " will be" +
          " enabled because of a ZooKeeper issue", e);
      }
View Full Code Here

      // executor pool is always available.
      //
      // If AssignmentManager hasn't finished rebuilding user regions,
      // we are not ready to assign dead regions either. So we re-queue up
      // the dead server for further processing too.
      AssignmentManager am = services.getAssignmentManager();
      if (isCarryingMeta() // hbase:meta
          || !am.isFailoverCleanupDone()) {
        this.services.getServerManager().processDeadServer(serverName, this.shouldSplitHlog);
        return;
      }

      // Wait on meta to come online; we need it to progress.
      // TODO: Best way to hold strictly here?  We should build this retry logic
      // into the MetaTableAccessor operations themselves.
      // TODO: Is the reading of hbase:meta necessary when the Master has state of
      // cluster in its head?  It should be possible to do without reading hbase:meta
      // in all but one case. On split, the RS updates the hbase:meta
      // table and THEN informs the master of the split via zk nodes in
      // 'unassigned' dir.  Currently the RS puts ephemeral nodes into zk so if
      // the regionserver dies, these nodes do not stick around and this server
      // shutdown processing does fixup (see the fixupDaughters method below).
      // If we wanted to skip the hbase:meta scan, we'd have to change at least the
      // final SPLIT message to be permanent in zk so in here we'd know a SPLIT
      // completed (zk is updated after edits to hbase:meta have gone in).  See
      // {@link SplitTransaction}.  We'd also have to be figure another way for
      // doing the below hbase:meta daughters fixup.
      Set<HRegionInfo> hris = null;
      while (!this.server.isStopped()) {
        try {
          server.getMetaTableLocator().waitMetaRegionLocation(server.getZooKeeper());
          // Skip getting user regions if the server is stopped.
          if (!this.server.isStopped()) {
            if (ConfigUtil.useZKForAssignment(server.getConfiguration())) {
              hris = MetaTableAccessor.getServerUserRegions(this.server.getShortCircuitConnection(),
                this.serverName).keySet();
            } else {
              // Not using ZK for assignment, regionStates has everything we want
              hris = am.getRegionStates().getServerRegions(serverName);
            }
          }
          break;
        } catch (InterruptedException e) {
          Thread.currentThread().interrupt();
          throw (InterruptedIOException)new InterruptedIOException().initCause(e);
        } catch (IOException ioe) {
          LOG.info("Received exception accessing hbase:meta during server shutdown of " +
            serverName + ", retrying hbase:meta read", ioe);
        }
      }
      if (this.server.isStopped()) {
        throw new IOException("Server is stopped");
      }

      // delayed to set recovery mode based on configuration only after all outstanding splitlogtask
      // drained
      this.services.getMasterFileSystem().setLogRecoveryMode();
      boolean distributedLogReplay =
        (this.services.getMasterFileSystem().getLogRecoveryMode() == RecoveryMode.LOG_REPLAY);

      try {
        if (this.shouldSplitHlog) {
          LOG.info("Splitting logs for " + serverName + " before assignment.");
          if (distributedLogReplay) {
            LOG.info("Mark regions in recovery before assignment.");
            MasterFileSystem mfs = this.services.getMasterFileSystem();
            mfs.prepareLogReplay(serverName, hris);
          } else {
            this.services.getMasterFileSystem().splitLog(serverName);
          }
          am.getRegionStates().logSplit(serverName);
        } else {
          LOG.info("Skipping log splitting for " + serverName);
        }
      } catch (IOException ioe) {
        resubmit(serverName, ioe);
      }

      // Clean out anything in regions in transition.  Being conservative and
      // doing after log splitting.  Could do some states before -- OPENING?
      // OFFLINE? -- and then others after like CLOSING that depend on log
      // splitting.
      List<HRegionInfo> regionsInTransition = am.processServerShutdown(serverName);
      LOG.info("Reassigning " + ((hris == null)? 0: hris.size()) +
        " region(s) that " + (serverName == null? "null": serverName+
        " was carrying (and " + regionsInTransition.size() +
        " regions(s) that were opening on this server)");

      List<HRegionInfo> toAssignRegions = new ArrayList<HRegionInfo>();
      toAssignRegions.addAll(regionsInTransition);

      // Iterate regions that were on this server and assign them
      if (hris != null && !hris.isEmpty()) {
        RegionStates regionStates = am.getRegionStates();
        for (HRegionInfo hri: hris) {
          if (regionsInTransition.contains(hri)) {
            continue;
          }
          String encodedName = hri.getEncodedName();
          Lock lock = am.acquireRegionLock(encodedName);
          try {
            RegionState rit = regionStates.getRegionTransitionState(hri);
            if (processDeadRegion(hri, am)) {
              ServerName addressFromAM = regionStates.getRegionServerOfRegion(hri);
              if (addressFromAM != null && !addressFromAM.equals(this.serverName)) {
                // If this region is in transition on the dead server, it must be
                // opening or pending_open, which should have been covered by AM#processServerShutdown
                LOG.info("Skip assigning region " + hri.getRegionNameAsString()
                  + " because it has been opened in " + addressFromAM.getServerName());
                continue;
              }
              if (rit != null) {
                if (rit.getServerName() != null && !rit.isOnServer(serverName)) {
                  // Skip regions that are in transition on other server
                  LOG.info("Skip assigning region in transition on other server" + rit);
                  continue;
                }
                try{
                  //clean zk node
                  LOG.info("Reassigning region with rs = " + rit + " and deleting zk node if exists");
                  ZKAssign.deleteNodeFailSilent(services.getZooKeeper(), hri);
                  regionStates.updateRegionState(hri, State.OFFLINE);
                } catch (KeeperException ke) {
                  this.server.abort("Unexpected ZK exception deleting unassigned node " + hri, ke);
                  return;
                }
              } else if (regionStates.isRegionInState(
                  hri, State.SPLITTING_NEW, State.MERGING_NEW)) {
                regionStates.updateRegionState(hri, State.OFFLINE);
              }
              toAssignRegions.add(hri);
            } else if (rit != null) {
              if (rit.isPendingCloseOrClosing()
                  && am.getTableStateManager().isTableState(hri.getTable(),
                  ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
                // If the table was partially disabled and the RS went down, we should clear the RIT
                // and remove the node for the region.
                // The rit that we use may be stale in case the table was in DISABLING state
                // but though we did assign we will not be clearing the znode in CLOSING state.
                // Doing this will have no harm. See HBASE-5927
                regionStates.updateRegionState(hri, State.OFFLINE);
                am.deleteClosingOrClosedNode(hri, rit.getServerName());
                am.offlineDisabledRegion(hri);
              } else {
                LOG.warn("THIS SHOULD NOT HAPPEN: unexpected region in transition "
                  + rit + " not to be assigned by SSH of server " + serverName);
              }
            }
          } finally {
            lock.unlock();
          }
        }
      }

      try {
        am.assign(toAssignRegions);
      } catch (InterruptedException ie) {
        LOG.error("Caught " + ie + " during round-robin assignment");
        throw (InterruptedIOException)new InterruptedIOException().initCause(ie);
      }

      if (this.shouldSplitHlog && distributedLogReplay) {
        // wait for region assignment completes
        for (HRegionInfo hri : toAssignRegions) {
          try {
            if (!am.waitOnRegionToClearRegionsInTransition(hri, regionAssignmentWaitTimeout)) {
              // Wait here is to avoid log replay hits current dead server and incur a RPC timeout
              // when replay happens before region assignment completes.
              LOG.warn("Region " + hri.getEncodedName()
                  + " didn't complete assignment in time");
            }
View Full Code Here

  @Override
  public void process() throws IOException {
    boolean gotException = true;
    try {
      AssignmentManager am = this.services.getAssignmentManager();
      this.services.getMasterFileSystem().setLogRecoveryMode();
      boolean distributedLogReplay =
        (this.services.getMasterFileSystem().getLogRecoveryMode() == RecoveryMode.LOG_REPLAY);
      try {
        if (this.shouldSplitHlog) {
          LOG.info("Splitting hbase:meta logs for " + serverName);
          if (distributedLogReplay) {
            Set<HRegionInfo> regions = new HashSet<HRegionInfo>();
            regions.add(HRegionInfo.FIRST_META_REGIONINFO);
            this.services.getMasterFileSystem().prepareLogReplay(serverName, regions);
          } else {
            this.services.getMasterFileSystem().splitMetaLog(serverName);
          }
          am.getRegionStates().logSplit(HRegionInfo.FIRST_META_REGIONINFO);
        }
      } catch (IOException ioe) {
        this.services.getExecutorService().submit(this);
        this.deadServers.add(serverName);
        throw new IOException("failed log splitting for " + serverName + ", will retry", ioe);
      }
 
      // Assign meta if we were carrying it.
      // Check again: region may be assigned to other where because of RIT
      // timeout
      if (am.isCarryingMeta(serverName)) {
        LOG.info("Server " + serverName + " was carrying META. Trying to assign.");
        am.regionOffline(HRegionInfo.FIRST_META_REGIONINFO);
        verifyAndAssignMetaWithRetries();
      } else if (!server.getMetaTableLocator().isLocationAvailable(this.server.getZooKeeper())) {
        // the meta location as per master is null. This could happen in case when meta assignment
        // in previous run failed, while meta znode has been updated to null. We should try to
        // assign the meta again.
        verifyAndAssignMetaWithRetries();
      } else {
        LOG.info("META has been assigned to otherwhere, skip assigning.");
      }

      try {
        if (this.shouldSplitHlog && distributedLogReplay) {
          if (!am.waitOnRegionToClearRegionsInTransition(HRegionInfo.FIRST_META_REGIONINFO,
            regionAssignmentWaitTimeout)) {
            // Wait here is to avoid log replay hits current dead server and incur a RPC timeout
            // when replay happens before region assignment completes.
            LOG.warn("Region " + HRegionInfo.FIRST_META_REGIONINFO.getEncodedName()
                + " didn't complete assignment in time");
View Full Code Here

    hTableDescriptor = getTableDescriptor();
  }

  protected void waitRegionInTransition(final List<HRegionInfo> regions)
      throws IOException, CoordinatedStateException {
    AssignmentManager am = this.masterServices.getAssignmentManager();
    RegionStates states = am.getRegionStates();
    long waitTime = server.getConfiguration().
      getLong("hbase.master.wait.on.region", 5 * 60 * 1000);
    for (HRegionInfo region : regions) {
      long done = System.currentTimeMillis() + waitTime;
      while (System.currentTimeMillis() < done) {
        if (states.isRegionInState(region, State.FAILED_OPEN)) {
          am.regionOffline(region);
        }
        if (!states.isRegionInTransition(region)) break;
        try {
          Thread.sleep(waitingTimeForEvents);
        } catch (InterruptedException e) {
          LOG.warn("Interrupted while sleeping");
          throw (InterruptedIOException)new InterruptedIOException().initCause(e);
        }
        LOG.debug("Waiting on region to clear regions in transition; "
          + am.getRegionStates().getRegionTransitionState(region));
      }
      if (states.isRegionInTransition(region)) {
        throw new IOException("Waited hbase.master.wait.on.region (" +
          waitTime + "ms) for region to leave region " +
          region.getRegionNameAsString() + " in transitions");
View Full Code Here

    } finally {
      // 3. Update table descriptor cache
      LOG.debug("Removing '" + tableName + "' descriptor.");
      this.masterServices.getTableDescriptors().remove(tableName);

      AssignmentManager am = this.masterServices.getAssignmentManager();

      // 4. Clean up regions of the table in RegionStates.
      LOG.debug("Removing '" + tableName + "' from region states.");
      am.getRegionStates().tableDeleted(tableName);

      // 5. If entry for this table in zk, and up in AssignmentManager, remove it.
      LOG.debug("Marking '" + tableName + "' as deleted.");
      am.getTableStateManager().setDeletedTable(tableName);
    }

    if (cpHost != null) {
      cpHost.postDeleteTableHandler(this.tableName);
    }
View Full Code Here

      cpHost.preSnapshot(snapshot, desc);
    }

    // if the table is enabled, then have the RS run actually the snapshot work
    TableName snapshotTable = TableName.valueOf(snapshot.getTable());
    AssignmentManager assignmentMgr = master.getAssignmentManager();
    if (assignmentMgr.getTableStateManager().isTableState(snapshotTable,
        ZooKeeperProtos.Table.State.ENABLED)) {
      LOG.debug("Table enabled, starting distributed snapshot.");
      snapshotEnabledTable(snapshot);
      LOG.debug("Started snapshot: " + ClientSnapshotDescriptionUtils.toString(snapshot));
    }
    // For disabled table, snapshot is created by the master
    else if (assignmentMgr.getTableStateManager().isTableState(snapshotTable,
        ZooKeeperProtos.Table.State.DISABLED)) {
      LOG.debug("Table is disabled, running snapshot entirely on master.");
      snapshotDisabledTable(snapshot);
      LOG.debug("Started snapshot: " + ClientSnapshotDescriptionUtils.toString(snapshot));
    } else {
View Full Code Here

    TEST_UTIL.startMiniZKCluster();
  }

  @Test
  public void testAssignmentManagerDoesntUseDrainingServer() throws Exception {
    AssignmentManager am;
    Configuration conf = TEST_UTIL.getConfiguration();
    final HMaster master = Mockito.mock(HMaster.class);
    final Server server = Mockito.mock(Server.class);
    final ServerManager serverManager = Mockito.mock(ServerManager.class);
    final ServerName SERVERNAME_A = ServerName.valueOf("mockserver_a.org", 1000, 8000);
    final ServerName SERVERNAME_B = ServerName.valueOf("mockserver_b.org", 1001, 8000);
    LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(conf);
    final HRegionInfo REGIONINFO = new HRegionInfo(TableName.valueOf("table_test"),
        HConstants.EMPTY_START_ROW, HConstants.EMPTY_START_ROW);

    ZooKeeperWatcher zkWatcher = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
      "zkWatcher-Test", abortable, true);

    Map<ServerName, ServerLoad> onlineServers = new HashMap<ServerName, ServerLoad>();

    onlineServers.put(SERVERNAME_A, ServerLoad.EMPTY_SERVERLOAD);
    onlineServers.put(SERVERNAME_B, ServerLoad.EMPTY_SERVERLOAD);

    Mockito.when(server.getConfiguration()).thenReturn(conf);
    Mockito.when(server.getServerName()).thenReturn(ServerName.valueOf("masterMock,1,1"));
    Mockito.when(server.getZooKeeper()).thenReturn(zkWatcher);

    CoordinatedStateManager cp = new ZkCoordinatedStateManager();
    cp.initialize(server);
    cp.start();

    Mockito.when(server.getCoordinatedStateManager()).thenReturn(cp);

    Mockito.when(serverManager.getOnlineServers()).thenReturn(onlineServers);
    Mockito.when(serverManager.getOnlineServersList())
    .thenReturn(new ArrayList<ServerName>(onlineServers.keySet()));
   
    Mockito.when(serverManager.createDestinationServersList())
        .thenReturn(new ArrayList<ServerName>(onlineServers.keySet()));
    Mockito.when(serverManager.createDestinationServersList(null))
        .thenReturn(new ArrayList<ServerName>(onlineServers.keySet()));
   
    for (ServerName sn : onlineServers.keySet()) {
      Mockito.when(serverManager.isServerOnline(sn)).thenReturn(true);
      Mockito.when(serverManager.sendRegionClose(sn, REGIONINFO, -1)).thenReturn(true);
      Mockito.when(serverManager.sendRegionClose(sn, REGIONINFO, -1, null, false)).thenReturn(true);
      Mockito.when(serverManager.sendRegionOpen(sn, REGIONINFO, -1, new ArrayList<ServerName>()))
      .thenReturn(RegionOpeningState.OPENED);
      Mockito.when(serverManager.sendRegionOpen(sn, REGIONINFO, -1, null))
      .thenReturn(RegionOpeningState.OPENED);
      Mockito.when(serverManager.addServerToDrainList(sn)).thenReturn(true);
    }

    Mockito.when(master.getServerManager()).thenReturn(serverManager);

    am = new AssignmentManager(server, serverManager,
        balancer, startupMasterExecutor("mockExecutorService"), null, null);

    Mockito.when(master.getAssignmentManager()).thenReturn(am);
    Mockito.when(master.getZooKeeper()).thenReturn(zkWatcher);

    am.addPlan(REGIONINFO.getEncodedName(), new RegionPlan(REGIONINFO, null, SERVERNAME_A));

    zkWatcher.registerListenerFirst(am);

    addServerToDrainedList(SERVERNAME_A, onlineServers, serverManager);

    am.assign(REGIONINFO, true);

    setRegionOpenedOnZK(zkWatcher, SERVERNAME_A, REGIONINFO);
    setRegionOpenedOnZK(zkWatcher, SERVERNAME_B, REGIONINFO);

    am.waitForAssignment(REGIONINFO);

    assertTrue(am.getRegionStates().isRegionOnline(REGIONINFO));
    assertNotEquals(am.getRegionStates().getRegionServerOfRegion(REGIONINFO), SERVERNAME_A);
  }
View Full Code Here

  @Test
  public void testAssignmentManagerDoesntUseDrainedServerWithBulkAssign() throws Exception {
    Configuration conf = TEST_UTIL.getConfiguration();
    LoadBalancer balancer = LoadBalancerFactory.getLoadBalancer(conf);
    AssignmentManager am;
    final HMaster master = Mockito.mock(HMaster.class);
    final Server server = Mockito.mock(Server.class);
    final ServerManager serverManager = Mockito.mock(ServerManager.class);
    final ServerName SERVERNAME_A = ServerName.valueOf("mockserverbulk_a.org", 1000, 8000);
    final ServerName SERVERNAME_B = ServerName.valueOf("mockserverbulk_b.org", 1001, 8000);
    final ServerName SERVERNAME_C = ServerName.valueOf("mockserverbulk_c.org", 1002, 8000);
    final ServerName SERVERNAME_D = ServerName.valueOf("mockserverbulk_d.org", 1003, 8000);
    final ServerName SERVERNAME_E = ServerName.valueOf("mockserverbulk_e.org", 1004, 8000);
    final Map<HRegionInfo, ServerName> bulk = new HashMap<HRegionInfo, ServerName>();

    Set<ServerName> bunchServersAssigned = new HashSet<ServerName>();
   
    HRegionInfo REGIONINFO_A = new HRegionInfo(TableName.valueOf("table_A"),
        HConstants.EMPTY_START_ROW, HConstants.EMPTY_START_ROW);
    HRegionInfo REGIONINFO_B = new HRegionInfo(TableName.valueOf("table_B"),
      HConstants.EMPTY_START_ROW, HConstants.EMPTY_START_ROW);
    HRegionInfo REGIONINFO_C = new HRegionInfo(TableName.valueOf("table_C"),
      HConstants.EMPTY_START_ROW, HConstants.EMPTY_START_ROW);
    HRegionInfo REGIONINFO_D = new HRegionInfo(TableName.valueOf("table_D"),
      HConstants.EMPTY_START_ROW, HConstants.EMPTY_START_ROW);
    HRegionInfo REGIONINFO_E = new HRegionInfo(TableName.valueOf("table_E"),
      HConstants.EMPTY_START_ROW, HConstants.EMPTY_START_ROW);

    Map<ServerName, ServerLoad> onlineServers = new HashMap<ServerName, ServerLoad>();
    List<ServerName> drainedServers = new ArrayList<ServerName>();

    onlineServers.put(SERVERNAME_A, ServerLoad.EMPTY_SERVERLOAD);
    onlineServers.put(SERVERNAME_B, ServerLoad.EMPTY_SERVERLOAD);
    onlineServers.put(SERVERNAME_C, ServerLoad.EMPTY_SERVERLOAD);
    onlineServers.put(SERVERNAME_D, ServerLoad.EMPTY_SERVERLOAD);
    onlineServers.put(SERVERNAME_E, ServerLoad.EMPTY_SERVERLOAD);

    bulk.put(REGIONINFO_A, SERVERNAME_A);
    bulk.put(REGIONINFO_B, SERVERNAME_B);
    bulk.put(REGIONINFO_C, SERVERNAME_C);
    bulk.put(REGIONINFO_D, SERVERNAME_D);
    bulk.put(REGIONINFO_E, SERVERNAME_E);

    ZooKeeperWatcher zkWatcher = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(),
        "zkWatcher-BulkAssignTest", abortable, true);

    Mockito.when(server.getConfiguration()).thenReturn(conf);
    Mockito.when(server.getServerName()).thenReturn(ServerName.valueOf("masterMock,1,1"));
    Mockito.when(server.getZooKeeper()).thenReturn(zkWatcher);

    CoordinatedStateManager cp = new ZkCoordinatedStateManager();
    cp.initialize(server);
    cp.start();

    Mockito.when(server.getCoordinatedStateManager()).thenReturn(cp);

    Mockito.when(serverManager.getOnlineServers()).thenReturn(onlineServers);
    Mockito.when(serverManager.getOnlineServersList()).thenReturn(
      new ArrayList<ServerName>(onlineServers.keySet()));
   
    Mockito.when(serverManager.createDestinationServersList()).thenReturn(
      new ArrayList<ServerName>(onlineServers.keySet()));
    Mockito.when(serverManager.createDestinationServersList(null)).thenReturn(
      new ArrayList<ServerName>(onlineServers.keySet()));
   
    for (Entry<HRegionInfo, ServerName> entry : bulk.entrySet()) {
      Mockito.when(serverManager.isServerOnline(entry.getValue())).thenReturn(true);
      Mockito.when(serverManager.sendRegionClose(entry.getValue(),
        entry.getKey(), -1)).thenReturn(true);
      Mockito.when(serverManager.sendRegionOpen(entry.getValue(),
        entry.getKey(), -1, null)).thenReturn(RegionOpeningState.OPENED)
      Mockito.when(serverManager.addServerToDrainList(entry.getValue())).thenReturn(true);
    }
   
    Mockito.when(master.getServerManager()).thenReturn(serverManager);

    drainedServers.add(SERVERNAME_A);
    drainedServers.add(SERVERNAME_B);
    drainedServers.add(SERVERNAME_C);
    drainedServers.add(SERVERNAME_D);

    am = new AssignmentManager(server, serverManager,
      balancer, startupMasterExecutor("mockExecutorServiceBulk"), null, null);
   
    Mockito.when(master.getAssignmentManager()).thenReturn(am);

    zkWatcher.registerListener(am);
   
    for (ServerName drained : drainedServers) {
      addServerToDrainedList(drained, onlineServers, serverManager);
    }
   
    am.assign(bulk);

    Map<String, RegionState> regionsInTransition = am.getRegionStates().getRegionsInTransition();
    for (Entry<String, RegionState> entry : regionsInTransition.entrySet()) {
      setRegionOpenedOnZK(zkWatcher, entry.getValue().getServerName(),
        entry.getValue().getRegion());
    }
   
    am.waitForAssignment(REGIONINFO_A);
    am.waitForAssignment(REGIONINFO_B);
    am.waitForAssignment(REGIONINFO_C);
    am.waitForAssignment(REGIONINFO_D);
    am.waitForAssignment(REGIONINFO_E);
   
    Map<HRegionInfo, ServerName> regionAssignments = am.getRegionStates().getRegionAssignments();
    for (Entry<HRegionInfo, ServerName> entry : regionAssignments.entrySet()) {
      LOG.info("Region Assignment: "
          + entry.getKey().getRegionNameAsString() + " Server: " + entry.getValue());
      bunchServersAssigned.add(entry.getValue());
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.master.AssignmentManager$ExistsUnassignedAsyncCallback

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.