Package org.apache.helix

Examples of org.apache.helix.ZNRecordBucketizer


    HelixDataAccessor accessor = manager.getHelixDataAccessor();
    Builder keyBuilder = accessor.keyBuilder();

    int bucketSize = message.getBucketSize();
    ZNRecordBucketizer bucketizer = new ZNRecordBucketizer(bucketSize);

    // Lock the helix manager so that the session id will not change when we update
    // the state model state. for zk current state it is OK as we have the per-session
    // current state node
    synchronized (manager)
    {
      if (!message.getTgtSessionId().equals(manager.getSessionId()))
      {
        logger.warn("Session id has changed. Skip postExecutionMessage. Old session "
            + message.getExecutionSessionId() + " , new session : "
            + manager.getSessionId());
        return;
      }

      if (taskResult.isSucess())
      {
        // String fromState = message.getFromState();
        String toState = message.getToState();
        _currentStateDelta.setState(partitionKey, toState);

        if (toState.equalsIgnoreCase("DROPPED"))
        {
          // for "OnOfflineToDROPPED" message, we need to remove the resource key record
          // from the current state of the instance because the resource key is dropped.
          // In the state model it will be stayed as "OFFLINE", which is OK.
          ZNRecordDelta delta =
              new ZNRecordDelta(_currentStateDelta.getRecord(), MergeOperation.SUBTRACT);
          // Don't subtract simple fields since they contain stateModelDefRef
          delta._record.getSimpleFields().clear();

          List<ZNRecordDelta> deltaList = new ArrayList<ZNRecordDelta>();
          deltaList.add(delta);
          _currentStateDelta.setDeltaList(deltaList);
        }
        else
        {
          // if the partition is not to be dropped, update _stateModel to the TO_STATE
          _stateModel.updateState(toState);
        }
      }
      else
      {
        if (exception instanceof HelixStateMismatchException)
        {
          // if fromState mismatch, set current state on zk to stateModel's current state
          logger.warn("Force CurrentState on Zk to be stateModel's CurrentState. partitionKey: "
              + partitionKey
              + ", currentState: "
              + _stateModel.getCurrentState()
              + ", message: " + message);
          _currentStateDelta.setState(partitionKey, _stateModel.getCurrentState());
        }
        else
        {
          StateTransitionError error =
              new StateTransitionError(ErrorType.INTERNAL, ErrorCode.ERROR, exception);
          if (exception instanceof InterruptedException)
          {
            if (_isTimeout)
            {
              error =
                  new StateTransitionError(ErrorType.INTERNAL,
                                           ErrorCode.TIMEOUT,
                                           exception);
            }
            else
            {
              // State transition interrupted but not caused by timeout. Keep the current
              // state in this case
              logger.error("State transition interrupted but not timeout. Not updating state. Partition : "
                  + message.getPartitionName() + " MsgId : " + message.getMsgId());
              return;
            }
          }
          _stateModel.rollbackOnError(message, context, error);
          _currentStateDelta.setState(partitionKey, "ERROR");
          _stateModel.updateState("ERROR");
        }
      }
    }
    try
    {
      // Update the ZK current state of the node
      PropertyKey key = keyBuilder.currentState(instanceName,
                              sessionId,
                              resource,
                              bucketizer.getBucketName(partitionKey));
      if (!_message.getGroupMessageMode())
      {
        accessor.updateProperty(key, _currentStateDelta);
      }
      else
View Full Code Here


        ZNRecord metaRecord = new ZNRecord(value.getId());
        metaRecord.setSimpleFields(value.getRecord().getSimpleFields());
        success = _baseDataAccessor.set(path, metaRecord, options);
        if (success)
        {
          ZNRecordBucketizer bucketizer = new ZNRecordBucketizer(value.getBucketSize());

          Map<String, ZNRecord> map = bucketizer.bucketize(value.getRecord());
          List<String> paths = new ArrayList<String>();
          List<ZNRecord> bucketizedRecords = new ArrayList<ZNRecord>();
          for (String bucketName : map.keySet())
          {
            paths.add(path + "/" + bucketName);
View Full Code Here

          ZNRecord metaRecord = new ZNRecord(value.getId());
          metaRecord.setSimpleFields(value.getRecord().getSimpleFields());
          records.add(metaRecord);

          ZNRecordBucketizer bucketizer = new ZNRecordBucketizer(value.getBucketSize());

          Map<String, ZNRecord> map = bucketizer.bucketize(value.getRecord());
          List<String> childBucketizedPaths = new ArrayList<String>();
          List<ZNRecord> childBucketizedRecords = new ArrayList<ZNRecord>();
          for (String bucketName : map.keySet())
          {
            childBucketizedPaths.add(path + "/" + bucketName);
View Full Code Here

{
  @Test
  public void testZNRecordBucketizer()
  {
    final int bucketSize = 3;
    ZNRecordBucketizer bucketizer = new ZNRecordBucketizer(bucketSize);
    String[] partitionNames =
        { "TestDB_0", "TestDB_1", "TestDB_2", "TestDB_3", "TestDB_4" };
    for (int i = 0; i < partitionNames.length; i++)
    {
      String partitionName = partitionNames[i];
      String bucketName = bucketizer.getBucketName(partitionName);
      int startBucketNb = i / bucketSize * bucketSize;
      int endBucketNb = startBucketNb + bucketSize - 1;
      String expectBucketName = "TestDB_p" + startBucketNb + "-p" + endBucketNb;
      System.out.println("Expect: " + expectBucketName + ", actual: " + bucketName);
      Assert.assertEquals(expectBucketName, bucketName);
View Full Code Here

public class TestZNRecordBucketizer {
  @Test
  public void testZNRecordBucketizer() {
    final int bucketSize = 3;
    ZNRecordBucketizer bucketizer = new ZNRecordBucketizer(bucketSize);
    String[] partitionNames = {
        "TestDB_0", "TestDB_1", "TestDB_2", "TestDB_3", "TestDB_4"
    };
    for (int i = 0; i < partitionNames.length; i++) {
      String partitionName = partitionNames[i];
      String bucketName = bucketizer.getBucketName(partitionName);
      int startBucketNb = i / bucketSize * bucketSize;
      int endBucketNb = startBucketNb + bucketSize - 1;
      String expectBucketName = "TestDB_p" + startBucketNb + "-p" + endBucketNb;
      System.out.println("Expect: " + expectBucketName + ", actual: " + bucketName);
      Assert.assertEquals(expectBucketName, bucketName);
View Full Code Here

    HelixDataAccessor accessor = _manager.getHelixDataAccessor();
    Builder keyBuilder = accessor.keyBuilder();

    int bucketSize = _message.getBucketSize();
    ZNRecordBucketizer bucketizer = new ZNRecordBucketizer(bucketSize);

    // No need to sync on manager, we are cancel executor in expiry session before start executor in
    // new session
    // sessionId might change when we update the state model state.
    // for zk current state it is OK as we have the per-session current state node
    if (!_message.getTgtSessionId().equals(_manager.getSessionId())) {
      logger.warn("Session id has changed. Skip postExecutionMessage. Old session "
          + _message.getExecutionSessionId() + " , new session : " + _manager.getSessionId());
      return;
    }

    if (taskResult.isSuccess()) {
      // String fromState = message.getFromState();
      String toState = _message.getToState();
      _currentStateDelta.setState(partitionKey, toState);

      if (toState.equalsIgnoreCase(HelixDefinedState.DROPPED.toString())) {
        // for "OnOfflineToDROPPED" message, we need to remove the resource key record
        // from the current state of the instance because the resource key is dropped.
        // In the state model it will be stayed as "OFFLINE", which is OK.
        ZNRecordDelta delta =
            new ZNRecordDelta(_currentStateDelta.getRecord(), MergeOperation.SUBTRACT);
        // Don't subtract simple fields since they contain stateModelDefRef
        delta._record.getSimpleFields().clear();

        List<ZNRecordDelta> deltaList = new ArrayList<ZNRecordDelta>();
        deltaList.add(delta);
        _currentStateDelta.setDeltaList(deltaList);
        _stateModelFactory.removeStateModel(partitionKey);
      } else {
        // if the partition is not to be dropped, update _stateModel to the TO_STATE
        _stateModel.updateState(toState);
      }
    } else {
      if (exception instanceof HelixStateMismatchException) {
        // if fromState mismatch, set current state on zk to stateModel's current state
        logger.warn("Force CurrentState on Zk to be stateModel's CurrentState. partitionKey: "
            + partitionKey + ", currentState: " + _stateModel.getCurrentState() + ", message: "
            + _message);
        _currentStateDelta.setState(partitionKey, _stateModel.getCurrentState());
      } else {
        StateTransitionError error =
            new StateTransitionError(ErrorType.INTERNAL, ErrorCode.ERROR, exception);
        if (exception instanceof InterruptedException) {
          if (_isTimeout) {
            error = new StateTransitionError(ErrorType.INTERNAL, ErrorCode.TIMEOUT, exception);
          } else {
            // State transition interrupted but not caused by timeout. Keep the current
            // state in this case
            logger
                .error("State transition interrupted but not timeout. Not updating state. Partition : "
                    + _message.getPartitionName() + " MsgId : " + _message.getMsgId());
            return;
          }
        }
        _stateModel.rollbackOnError(_message, _notificationContext, error);
        _currentStateDelta.setState(partitionKey, HelixDefinedState.ERROR.toString());
        _stateModel.updateState(HelixDefinedState.ERROR.toString());

        // if we have errors transit from ERROR state, disable the partition
        if (_message.getFromState().equalsIgnoreCase(HelixDefinedState.ERROR.toString())) {
          disablePartition();
        }
      }
    }

    try {
      // Update the ZK current state of the node
      PropertyKey key =
          keyBuilder.currentState(instanceName, sessionId, resource,
              bucketizer.getBucketName(partitionKey));
      if (_message.getAttribute(Attributes.PARENT_MSG_ID) == null) {
        // normal message
        accessor.updateProperty(key, _currentStateDelta);
      } else {
        // sub-message of a batch message
View Full Code Here

          ZNRecord metaRecord = new ZNRecord(value.getId());
          metaRecord.setSimpleFields(value.getRecord().getSimpleFields());
          records.add(metaRecord);

          ZNRecordBucketizer bucketizer = new ZNRecordBucketizer(value.getBucketSize());

          Map<String, ZNRecord> map = bucketizer.bucketize(value.getRecord());
          List<String> childBucketizedPaths = new ArrayList<String>();
          List<ZNRecord> childBucketizedRecords = new ArrayList<ZNRecord>();
          for (String bucketName : map.keySet()) {
            childBucketizedPaths.add(path + "/" + bucketName);
            childBucketizedRecords.add(map.get(bucketName));
View Full Code Here

        // set parent node
        ZNRecord metaRecord = new ZNRecord(value.getId());
        metaRecord.setSimpleFields(value.getRecord().getSimpleFields());
        success = _baseDataAccessor.set(path, metaRecord, options);
        if (success) {
          ZNRecordBucketizer bucketizer = new ZNRecordBucketizer(value.getBucketSize());

          Map<String, ZNRecord> map = bucketizer.bucketize(value.getRecord());
          List<String> paths = new ArrayList<String>();
          List<ZNRecord> bucketizedRecords = new ArrayList<ZNRecord>();
          for (String bucketName : map.keySet()) {
            paths.add(path + "/" + bucketName);
            bucketizedRecords.add(map.get(bucketName));
View Full Code Here

        // set parent node
        ZNRecord metaRecord = new ZNRecord(value.getId());
        metaRecord.setSimpleFields(value.getRecord().getSimpleFields());
        success = _baseDataAccessor.set(path, metaRecord, options);
        if (success) {
          ZNRecordBucketizer bucketizer = new ZNRecordBucketizer(value.getBucketSize());

          Map<String, ZNRecord> map = bucketizer.bucketize(value.getRecord());
          List<String> paths = new ArrayList<String>();
          List<ZNRecord> bucketizedRecords = new ArrayList<ZNRecord>();
          for (String bucketName : map.keySet()) {
            paths.add(path + "/" + bucketName);
            bucketizedRecords.add(map.get(bucketName));
View Full Code Here

          ZNRecord metaRecord = new ZNRecord(value.getId());
          metaRecord.setSimpleFields(value.getRecord().getSimpleFields());
          records.add(metaRecord);

          ZNRecordBucketizer bucketizer = new ZNRecordBucketizer(value.getBucketSize());

          Map<String, ZNRecord> map = bucketizer.bucketize(value.getRecord());
          List<String> childBucketizedPaths = new ArrayList<String>();
          List<ZNRecord> childBucketizedRecords = new ArrayList<ZNRecord>();
          for (String bucketName : map.keySet()) {
            childBucketizedPaths.add(path + "/" + bucketName);
            childBucketizedRecords.add(map.get(bucketName));
View Full Code Here

TOP

Related Classes of org.apache.helix.ZNRecordBucketizer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.