Package org.infinispan.remoting.transport

Examples of org.infinispan.remoting.transport.Address


      }
      return null;
   }

   final boolean shouldTransferOwnershipToJoinNode(Object k) {    
      Address self = transport.getAddress();     
      int numCopies = configuration.getNumOwners();
      List<Address> oldOwnerList = oldCH.locate(k, numCopies);
      if (!oldOwnerList.isEmpty() && self.equals(oldOwnerList.get(0))) {
         List<Address> newOwnerList = newCH.locate(k, numCopies);
         if (newOwnerList.contains(sender)) return true;
      }
      return false;
   }
View Full Code Here


      if (t.isCoordinator()) {
         if (trace) log.tracef("Node %s is the coordinator, marking push for %d as complete directly", self, viewId);
         markNodePushCompleted(viewId, self);
      } else {
         final RehashControlCommand cmd = cf.buildRehashControlCommand(RehashControlCommand.Type.NODE_PUSH_COMPLETED, self, viewId);
         Address coordinator = rpcManager.getTransport().getCoordinator();

         if (trace) log.tracef("Node %s is not the coordinator, sending request to mark push for %d as complete to %s", self, viewId, coordinator);
         rpcManager.invokeRemotely(Collections.singleton(coordinator), cmd, ResponseMode.SYNCHRONOUS, configuration.getRehashRpcTimeout());
      }
   }
View Full Code Here

   }

   private void pushState(ConsistentHash chOld, ConsistentHash chNew, Map<Address, Map<Object, InternalCacheValue>> states) throws InterruptedException, ExecutionException {
      NotifyingNotifiableFuture<Object> stateTransferFuture = new AggregatingNotifyingFutureImpl(null, states.size());
      for (Map.Entry<Address, Map<Object, InternalCacheValue>> entry : states.entrySet()) {
         final Address target = entry.getKey();
         Map<Object, InternalCacheValue> state = entry.getValue();
         log.debugf("Pushing to node %s %d keys", target, state.size());
         log.tracef("Pushing to node %s keys: %s", target, state.keySet());

         final RehashControlCommand cmd = cf.buildRehashControlCommand(RehashControlCommand.Type.APPLY_STATE, self,
View Full Code Here

      if (oldOwners.equals(newOwners))
         return;

      // 3. The pushing server is the last node in the old owner list that's also in the new CH
      // It will only be null if all the old owners left the cluster
      Address pushingOwner = null;
      for (int i = oldOwners.size() - 1; i >= 0; i--) {
         Address server = oldOwners.get(i);
         if (chNew.getCaches().contains(server)) {
            pushingOwner = server;
            break;
         }
      }
View Full Code Here

    * If none exists, will be created first.
    */
   public LocalTransaction getOrCreateLocalTransaction(Transaction transaction, InvocationContext ctx) {
      LocalTransaction current = localTransactions.get(transaction);
      if (current == null) {
         Address localAddress = rpcManager != null ? rpcManager.getTransport().getAddress() : null;
         GlobalTransaction tx = txFactory.newGlobalTransaction(localAddress, false);
         if (trace) log.tracef("Created a new GlobalTransaction %s", tx);
         current = txFactory.newLocalTransaction(transaction, tx);
         localTransactions.put(transaction, current);
         notifier.notifyTransactionRegistered(tx, ctx);
View Full Code Here

      public void onDataRehashedEvent(DataRehashedEvent dre) {
         // do all the work AFTER the consistent hash has changed
         if (dre.isPre())
            return;

         Address self = rpcManager.getAddress();

         if (configuration.isEagerLockingSingleNodeInUse()) {
            // roll back local transactions if their main data owner has changed
            for (LocalTransaction localTx : localTransactions.values()) {
               for (Object key : localTx.getAffectedKeys()) {
View Full Code Here

      int actualReplCount = Math.min(replCount, caches.size());
      int normalizedHash = getNormalizedHash(getGrouping(key));
      List<Address> owners = new ArrayList<Address>(replCount);

      for (Iterator<Map.Entry<Integer, Address>> it = getPositionsIterator(normalizedHash); it.hasNext();) {
         Address a = it.next().getValue();
         // if virtual nodes are enabled we have to avoid duplicate addresses
         if (!(isVirtualNodesEnabled() && owners.contains(a))) {
            if (a == target)
               return null;
View Full Code Here

   public <T, K> Future<T> submit(Callable<T> task, K... input) {
      if (task == null) throw new NullPointerException();     
     
      if(inputKeysSpecified(input)){
         Map<Address, List<K>> nodesKeysMap = mapKeysToNodes(input);
         Address me = rpc.getAddress();
         DistributedExecuteCommand<T> c = factory.buildDistributedExecuteCommand(task, me, Arrays.asList(input));
         DistributedRunnableFuture<T> f = new DistributedRunnableFuture<T>(c);
         ArrayList<Address> nodes = new ArrayList<Address>(nodesKeysMap.keySet());
         executeFuture(selectExecutionNode(nodes), f);        
         return f;
View Full Code Here

   @Override
   public <T> List<Future<T>> submitEverywhere(Callable<T> task) {
      if (task == null) throw new NullPointerException();
      List<Future<T>> futures = new ArrayList<Future<T>>();
      List<Address> members = rpc.getTransport().getMembers();
      Address me = rpc.getAddress();
      for (Address target : members) {
         DistributedExecuteCommand<T> c = null;
         if (target.equals(me)) {
            c = factory.buildDistributedExecuteCommand(clone(task), me, null);
         } else {
View Full Code Here

   @Override
   public <T, K> List<Future<T>> submitEverywhere(Callable<T> task, K... input) {
      if (task == null) throw new NullPointerException();
      if(inputKeysSpecified(input)) {
         List<Future<T>> futures = new ArrayList<Future<T>>();
         Address me = rpc.getAddress();
         Map<Address, List<K>> nodesKeysMap = mapKeysToNodes(input);
         for (Entry<Address, List<K>> e : nodesKeysMap.entrySet()) {
            Address target = e.getKey();  
            DistributedExecuteCommand<T> c = null;
            if (target.equals(me)) {
               c = factory.buildDistributedExecuteCommand(clone(task), me, e.getValue());
            } else {
               c = factory.buildDistributedExecuteCommand(task, me, e.getValue());
            }
            DistributedRunnableFuture<T> f = new DistributedRunnableFuture<T>(c);
View Full Code Here

TOP

Related Classes of org.infinispan.remoting.transport.Address

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.