Package org.infinispan.distribution.ch

Examples of org.infinispan.distribution.ch.ConsistentHash


   public String getPendingViewAsString() {
      CacheTopology cacheTopology = stateTransferManager.getCacheTopology();
      if (cacheTopology == null)
         return "N/A";

      ConsistentHash pendingCH = cacheTopology.getPendingCH();
      return pendingCH != null ? pendingCH.getMembers().toString() : "null";
   }
View Full Code Here


      return cache.getAdvancedCache().getRpcManager().getTransport().getMembers();
   }

   private Address getAddressForKey(Object key) {
      DistributionManager distributionManager = getDistributionManager();
      ConsistentHash hash = distributionManager.getConsistentHash();
      return hash.locatePrimaryOwner(key);
   }
View Full Code Here

      return hash.locatePrimaryOwner(key);
   }

   private boolean isNodeInConsistentHash(Address address) {
      DistributionManager distributionManager = getDistributionManager();
      ConsistentHash hash = distributionManager.getConsistentHash();
      return hash.getMembers().contains(address);
   }
View Full Code Here

      if (trace) {
         log.tracef("Received request for transactions from node %s for segments %s of cache %s with topology id %d", destination, segments, cacheName, requestTopologyId);
      }

      final CacheTopology cacheTopology = getCacheTopology(requestTopologyId, destination, true);
      final ConsistentHash readCh = cacheTopology.getReadConsistentHash();

      Set<Integer> ownedSegments = readCh.getSegmentsForOwner(rpcManager.getAddress());
      if (!ownedSegments.containsAll(segments)) {
         segments.removeAll(ownedSegments);
         throw new IllegalArgumentException("Segments " + segments + " are not owned by " + rpcManager.getAddress());
      }
View Full Code Here

                                              List<TransactionInfo> transactionsToTransfer,
                                              Collection<? extends CacheTransaction> transactions,
                                              Set<Integer> segments, CacheTopology cacheTopology) {
      int topologyId = cacheTopology.getTopologyId();
      List<Address> members = cacheTopology.getMembers();
      ConsistentHash readCh = cacheTopology.getReadConsistentHash();

      // no need to filter out state transfer generated transactions because there should not be any such transactions running for any of the requested segments
      for (CacheTransaction tx : transactions) {
         // Skip transactions whose originators left. The topology id check is needed for joiners.
         // Also skip transactions that originates after state transfer starts.
         if (tx.getTopologyId() == topologyId || !members.contains(tx.getGlobalTransaction().getAddress()))
            continue;

         // transfer only locked keys that belong to requested segments
         Set<Object> filteredLockedKeys = new HashSet<Object>();
         Set<Object> lockedKeys = tx.getLockedKeys();
         synchronized (lockedKeys) {
            for (Object key : lockedKeys) {
               if (segments.contains(readCh.getSegment(key))) {
                  filteredLockedKeys.add(key);
               }
            }
         }
         Set<Object> backupLockedKeys = tx.getBackupLockedKeys();
         synchronized (backupLockedKeys) {
            for (Object key : backupLockedKeys) {
               if (segments.contains(readCh.getSegment(key))) {
                  filteredLockedKeys.add(key);
               }
            }
         }
         if (!filteredLockedKeys.isEmpty()) {
View Full Code Here

      // Make sure we don't send a REBALANCE_CONFIRM command before we've added all the transfer tasks
      // even if some of the tasks are removed and re-added
      waitingForState.set(false);

      final ConsistentHash newReadCh = cacheTopology.getReadConsistentHash();
      final ConsistentHash newWriteCh = cacheTopology.getWriteConsistentHash();
      final ConsistentHash previousReadCh = this.cacheTopology != null ? this.cacheTopology.getReadConsistentHash() : null;
      final ConsistentHash previousWriteCh = this.cacheTopology != null ? this.cacheTopology.getWriteConsistentHash() : null;
      // Ensures writes to the data container use the right consistent hash
      // No need for a try/finally block, since it's just an assignment
      stateTransferLock.acquireExclusiveTopologyLock();
      this.cacheTopology = cacheTopology;
      if (isRebalance) {
View Full Code Here

      return consistentHash.getMembers().contains(address) ? consistentHash.getSegmentsForOwner(address)
            : InfinispanCollections.<Integer>emptySet();
   }

   public void applyState(Address sender, int topologyId, Collection<StateChunk> stateChunks) {
      ConsistentHash wCh = cacheTopology.getWriteConsistentHash();
      // Ignore responses received after we are no longer a member
      if (!wCh.getMembers().contains(rpcManager.getAddress())) {
         if (trace) {
            log.tracef("Ignoring received state because we are no longer a member of cache %s", cacheName);
         }
         return;
      }

      // Ignore segments that we requested for a previous rebalance
      // Can happen when the coordinator leaves, and the new coordinator cancels the rebalance in progress
      int rebalanceTopologyId = stateTransferTopologyId.get();
      if (rebalanceTopologyId == NO_REBALANCE_IN_PROGRESS) {
         log.debugf("Discarding state response with topology id %d for cache %s, we don't have a state transfer in progress",
               topologyId, cacheName);
         return;
      }
      if (topologyId < rebalanceTopologyId) {
         log.debugf("Discarding state response with old topology id %d for cache %s, state transfer request topology was %d",
               topologyId, cacheName, waitingForState.get());
         return;
      }

      if (trace) {
         log.tracef("Before applying the received state the data container of cache %s has %d keys", cacheName, dataContainer.size());
      }

      Set<Integer> mySegments = wCh.getSegmentsForOwner(rpcManager.getAddress());
      for (StateChunk stateChunk : stateChunks) {
         if (!mySegments.contains(stateChunk.getSegmentId())) {
            log.warnf("Discarding received cache entries for segment %d of cache %s because they do not belong to this node.", stateChunk.getSegmentId(), cacheName);
            continue;
         }
View Full Code Here

    * and subsequently request it again from the new owner later.
    * @param event The data rehash event
    */
   @DataRehashed
   public void dataRehashed(DataRehashedEvent event) {
      ConsistentHash startHash = event.getConsistentHashAtStart();
      ConsistentHash endHash = event.getConsistentHashAtEnd();
      boolean trace = log.isTraceEnabled();
      if (event.isPre() && startHash != null && endHash != null) {
         log.tracef("Data rehash occurring startHash: %s and endHash: %s", startHash, endHash);

         if (!changeListener.isEmpty()) {
            if (trace) {
               log.tracef("Previous segments %s ", startHash.getPrimarySegmentsForOwner(localAddress));
               log.tracef("After segments %s ", endHash.getPrimarySegmentsForOwner(localAddress));
            }
            // we don't care about newly added segments, since that means our run wouldn't include them anyways
            Set<Integer> beforeSegments = new HashSet<Integer>(startHash.getPrimarySegmentsForOwner(localAddress));
            // Now any that were there before but aren't there now should be added - we don't care about new segments
            // since our current request shouldn't be working on it - it will have to retrieve it later
            beforeSegments.removeAll(endHash.getPrimarySegmentsForOwner(localAddress));
            if (!beforeSegments.isEmpty()) {
               // We have to make sure all current listeners get the newest hashes updated.  This has to occur for
               // new nodes and nodes leaving as the hash segments will change in both cases.
               for (Map.Entry<UUID, SegmentChangeListener> entry : changeListener.entrySet()) {
                  if (trace) {
View Full Code Here

    * @param event The topology change event
    */
   @TopologyChanged
   public void topologyChanged(TopologyChangedEvent event) {
      if (event.isPre()) {
         ConsistentHash beforeHash = event.getConsistentHashAtStart();
         ConsistentHash afterHash = event.getConsistentHashAtEnd();

         currentHash.set(afterHash);
         boolean trace = log.isTraceEnabled();

         if (beforeHash != null && afterHash != null) {
            if (trace) {
               log.tracef("Rehash hashes before %s after %s", beforeHash, afterHash);
            }
            Set<Address> leavers = new HashSet<Address>(beforeHash.getMembers());
            leavers.removeAll(afterHash.getMembers());
            if (!leavers.isEmpty() && trace) {
               log.tracef("Found leavers are %s", leavers);
            }

            for (Map.Entry<UUID, IterationStatus<K, V, ? extends Object>> details : iteratorDetails.entrySet()) {
View Full Code Here

   private <H, C extends H> void startRetrievingValues(final UUID identifier, final Set<Integer> segments,
                                         final KeyValueFilter<? super K, ? super V> filter,
                                         final Converter<? super K, ? super V, C> converter,
                                         final Set<Flag> flags, final SegmentBatchHandler<K, H> handler) {
      ConsistentHash hash = getCurrentHash();
      final Set<Integer> inDoubtSegments = new HashSet<>(segments.size());
      boolean canTryProcess = false;
      Iterator<Integer> iter = segments.iterator();
      while (iter.hasNext()) {
         Integer segment = iter.next();
         // If we still own any segments try to process
         if (localAddress.equals(hash.locatePrimaryOwnerForSegment(segment))) {
            canTryProcess = true;
         } else {
            inDoubtSegments.add(segment);
            iter.remove();
         }
      }
      if (canTryProcess) {
         executorService.execute(new Runnable() {

            @Override
            public void run() {
               Set<Integer> segmentsToUse = segments;
               Set<Integer> inDoubtSegmentsToUse = inDoubtSegments;
               ConsistentHash hashToUse = getCurrentHash();
               // this will stay as true for a local invocation until all local segments have been processed
               // a non local will set this to false at the end every time
               boolean repeat = true;
               while (repeat) {
                  if (log.isTraceEnabled()) {
View Full Code Here

TOP

Related Classes of org.infinispan.distribution.ch.ConsistentHash

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.