Examples of AbstractReplicationStrategy


Examples of org.apache.cassandra.locator.AbstractReplicationStrategy

        expectedEndpoints.get("Keyspace4").putAll(new BigIntegerToken("95"), makeAddrs("127.0.0.1", "127.0.0.2", "127.0.0.3"));

        for (Map.Entry<String, AbstractReplicationStrategy> tableStrategy : tableStrategyMap.entrySet())
        {
            String table = tableStrategy.getKey();
            AbstractReplicationStrategy strategy = tableStrategy.getValue();

            for (Token token : keyTokens)
            {
                endpoints = tmd.getWriteEndpoints(token, table, strategy.getNaturalEndpoints(token));
                assertTrue(expectedEndpoints.get(table).get(token).size() == endpoints.size());
                assertTrue(expectedEndpoints.get(table).get(token).containsAll(endpoints));
            }

            // just to be sure that things still work according to the old tests, run them:
            if (strategy.getReplicationFactor() != 3)
                continue;

            // tokens 5, 15 and 25 should go three nodes
            for (int i = 0; i < 3; i++)
            {
                endpoints = tmd.getWriteEndpoints(keyTokens.get(i), table, strategy.getNaturalEndpoints(keyTokens.get(i)));
                assertTrue(endpoints.size() == 3);
                assertTrue(endpoints.contains(hosts.get(i+1)));
                assertTrue(endpoints.contains(hosts.get(i+2)));
                assertTrue(endpoints.contains(hosts.get(i+3)));
            }

            // token 35 should go to nodes 4, 5, 6 and boot1
            endpoints = tmd.getWriteEndpoints(keyTokens.get(3), table, strategy.getNaturalEndpoints(keyTokens.get(3)));
            assertTrue(endpoints.size() == 4);
            assertTrue(endpoints.contains(hosts.get(4)));
            assertTrue(endpoints.contains(hosts.get(5)));
            assertTrue(endpoints.contains(hosts.get(6)));
            assertTrue(endpoints.contains(boot1));

            // token 45 should go to nodes 5, 6, 7 boot1
            endpoints = tmd.getWriteEndpoints(keyTokens.get(4), table, strategy.getNaturalEndpoints(keyTokens.get(4)));
            assertTrue(endpoints.size() == 4);
            assertTrue(endpoints.contains(hosts.get(5)));
            assertTrue(endpoints.contains(hosts.get(6)));
            assertTrue(endpoints.contains(hosts.get(7)));
            assertTrue(endpoints.contains(boot1));

            // token 55 should go to nodes 6, 7, 8 boot1 and boot2
            endpoints = tmd.getWriteEndpoints(keyTokens.get(5), table, strategy.getNaturalEndpoints(keyTokens.get(5)));
            assertTrue(endpoints.size() == 5);
            assertTrue(endpoints.contains(hosts.get(6)));
            assertTrue(endpoints.contains(hosts.get(7)));
            assertTrue(endpoints.contains(hosts.get(8)));
            assertTrue(endpoints.contains(boot1));
            assertTrue(endpoints.contains(boot2));

            // token 65 should go to nodes 7, 8, 9 and boot2
            endpoints = tmd.getWriteEndpoints(keyTokens.get(6), table, strategy.getNaturalEndpoints(keyTokens.get(6)));
            assertTrue(endpoints.size() == 4);
            assertTrue(endpoints.contains(hosts.get(7)));
            assertTrue(endpoints.contains(hosts.get(8)));
            assertTrue(endpoints.contains(hosts.get(9)));
            assertTrue(endpoints.contains(boot2));

            // token 75 should to go nodes 8, 9, 0 and boot2
            endpoints = tmd.getWriteEndpoints(keyTokens.get(7), table, strategy.getNaturalEndpoints(keyTokens.get(7)));
            assertTrue(endpoints.size() == 4);
            assertTrue(endpoints.contains(hosts.get(8)));
            assertTrue(endpoints.contains(hosts.get(9)));
            assertTrue(endpoints.contains(hosts.get(0)));
            assertTrue(endpoints.contains(boot2));

            // token 85 should go to nodes 9, 0, 1
            endpoints = tmd.getWriteEndpoints(keyTokens.get(8), table, strategy.getNaturalEndpoints(keyTokens.get(8)));
            assertTrue(endpoints.size() == 3);
            assertTrue(endpoints.contains(hosts.get(9)));
            assertTrue(endpoints.contains(hosts.get(0)));
            assertTrue(endpoints.contains(hosts.get(1)));

            // token 95 should go to nodes 0, 1 and 2
            endpoints = tmd.getWriteEndpoints(keyTokens.get(9), table, strategy.getNaturalEndpoints(keyTokens.get(9)));
            assertTrue(endpoints.size() == 3);
            assertTrue(endpoints.contains(hosts.get(0)));
            assertTrue(endpoints.contains(hosts.get(1)));
            assertTrue(endpoints.contains(hosts.get(2)));
        }
View Full Code Here

Examples of org.apache.cassandra.locator.AbstractReplicationStrategy

                InetAddress.getByName("127.0.0.2"),
                ApplicationState.STATUS,
                vvFactory.relocating(Collections.singleton(relocateToken)));
        assertTrue(tmd.isRelocating(relocateToken));

        AbstractReplicationStrategy strategy;
        for (String table : Schema.instance.getNonSystemTables())
        {
            strategy = getStrategy(table, tmd);
            for (Token token : tokenMap.keySet())
            {
                BigIntegerToken keyToken = new BigIntegerToken(((BigInteger)token.token).add(new BigInteger("5")));

                HashSet<InetAddress> actual = new HashSet<InetAddress>(tmd.getWriteEndpoints(keyToken, table, strategy.calculateNaturalEndpoints(keyToken, tmd.cloneOnlyTokenMap())));
                HashSet<InetAddress> expected = new HashSet<InetAddress>();

                for (int i = 0; i < actual.size(); i++)
                    expected.add(expectedEndpoints.get(keyToken).get(i));
View Full Code Here

Examples of org.apache.cassandra.locator.AbstractReplicationStrategy

        // for each of the non system tables calculating new ranges
        // which current node will handle after move to the new token
        for (String table : tablesToProcess)
        {
            // replication strategy of the current keyspace (aka table)
            AbstractReplicationStrategy strategy = Table.open(table).getReplicationStrategy();

            // getting collection of the currently used ranges by this keyspace
            Collection<Range> currentRanges = getRangesForEndpoint(table, localAddress);
            // collection of ranges which this node will serve after move to the new token
            Collection<Range> updatedRanges = strategy.getPendingAddressRanges(tokenMetadata_, newToken, localAddress);

            // ring ranges and endpoints associated with them
            // this used to determine what nodes should we ping about range data
            Multimap<Range, InetAddress> rangeAddresses = strategy.getRangeAddresses(tokenMetadata_);

            // calculated parts of the ranges to request/stream from/to nodes in the ring
            Pair<Set<Range>, Set<Range>> rangesPerTable = calculateStreamAndFetchRanges(currentRanges, updatedRanges);

            /**
             * In this loop we are going through all ranges "to fetch" and determining
             * nodes in the ring responsible for data we are interested in
             */
            Multimap<Range, InetAddress> rangesToFetchWithPreferredEndpoints = ArrayListMultimap.create();
            for (Range toFetch : rangesPerTable.right)
            {
                for (Range range : rangeAddresses.keySet())
                {
                    if (range.contains(toFetch))
                    {
                        List<InetAddress> endpoints = snitch.getSortedListByProximity(localAddress, rangeAddresses.get(range));
                        // storing range and preferred endpoint set
                        rangesToFetchWithPreferredEndpoints.putAll(toFetch, endpoints);
                    }
                }
            }

            // calculating endpoints to stream current ranges to if needed
            // in some situations node will handle current ranges as part of the new ranges
            Multimap<Range, InetAddress> rangeWithEndpoints = HashMultimap.create();

            for (Range toStream : rangesPerTable.left)
            {
                Set<InetAddress> currentEndpoints = ImmutableSet.copyOf(strategy.calculateNaturalEndpoints(toStream.right, tokenMetadata_));
                Set<InetAddress> newEndpoints = ImmutableSet.copyOf(strategy.calculateNaturalEndpoints(toStream.right, tokenMetaClone));
                rangeWithEndpoints.putAll(toStream, Sets.difference(newEndpoints, currentEndpoints));
            }

            // associating table with range-to-endpoints map
            rangesToStreamByTable.put(table, rangeWithEndpoints);
View Full Code Here

Examples of org.apache.cassandra.locator.AbstractReplicationStrategy

                                                     Runnable callback,
                                                     WriteType writeType)
    throws UnavailableException, OverloadedException, IOException
    {
        String table = mutation.getTable();
        AbstractReplicationStrategy rs = Table.open(table).getReplicationStrategy();

        Token tk = StorageService.getPartitioner().getToken(mutation.key());
        List<InetAddress> naturalEndpoints = StorageService.instance.getNaturalEndpoints(table, tk);
        Collection<InetAddress> pendingEndpoints = StorageService.instance.getTokenMetadata().pendingEndpointsFor(tk, table);

        AbstractWriteResponseHandler responseHandler = rs.getWriteResponseHandler(naturalEndpoints, pendingEndpoints, consistency_level, callback, writeType);

        // exit early if we can't fulfill the CL at this time
        responseHandler.assureSufficientLiveNodes();

        performer.apply(mutation, Iterables.concat(naturalEndpoints, pendingEndpoints), responseHandler, localDataCenter, consistency_level);
View Full Code Here

Examples of org.apache.cassandra.locator.AbstractReplicationStrategy

    }

    // same as above except does not initiate writes (but does perfrom availability checks).
    private static WriteResponseHandlerWrapper wrapResponseHandler(RowMutation mutation, ConsistencyLevel consistency_level, WriteType writeType)
    {
        AbstractReplicationStrategy rs = Table.open(mutation.getTable()).getReplicationStrategy();
        String table = mutation.getTable();
        Token tk = StorageService.getPartitioner().getToken(mutation.key());
        List<InetAddress> naturalEndpoints = StorageService.instance.getNaturalEndpoints(table, tk);
        Collection<InetAddress> pendingEndpoints = StorageService.instance.getTokenMetadata().pendingEndpointsFor(tk, table);
        AbstractWriteResponseHandler responseHandler = rs.getWriteResponseHandler(naturalEndpoints, pendingEndpoints, consistency_level, null, writeType);
        return new WriteResponseHandlerWrapper(responseHandler, mutation);
    }
View Full Code Here

Examples of org.apache.cassandra.locator.AbstractReplicationStrategy

        }
        else
        {
            // Exit now if we can't fulfill the CL here instead of forwarding to the leader replica
            String table = cm.getTable();
            AbstractReplicationStrategy rs = Table.open(table).getReplicationStrategy();
            Token tk = StorageService.getPartitioner().getToken(cm.key());
            List<InetAddress> naturalEndpoints = StorageService.instance.getNaturalEndpoints(table, tk);
            Collection<InetAddress> pendingEndpoints = StorageService.instance.getTokenMetadata().pendingEndpointsFor(tk, table);

            rs.getWriteResponseHandler(naturalEndpoints, pendingEndpoints, cm.consistency(), null, WriteType.COUNTER).assureSufficientLiveNodes();

            // Forward the actual update to the chosen leader replica
            AbstractWriteResponseHandler responseHandler = new WriteResponseHandler(endpoint, WriteType.COUNTER);

            if (logger.isTraceEnabled())
View Full Code Here

Examples of org.apache.cassandra.locator.AbstractReplicationStrategy

                                                            Runnable callback,
                                                            WriteType writeType)
    throws UnavailableException, OverloadedException, IOException
    {
        String table = mutation.getTable();
        AbstractReplicationStrategy rs = Table.open(table).getReplicationStrategy();

        Token tk = StorageService.getPartitioner().getToken(mutation.key());
        List<InetAddress> naturalEndpoints = StorageService.instance.getNaturalEndpoints(table, tk);
        Collection<InetAddress> pendingEndpoints = StorageService.instance.getTokenMetadata().pendingEndpointsFor(tk, table);

        AbstractWriteResponseHandler responseHandler = rs.getWriteResponseHandler(naturalEndpoints, pendingEndpoints, consistency_level, callback, writeType);

        // exit early if we can't fulfill the CL at this time
        responseHandler.assureSufficientLiveNodes();

        performer.apply(mutation, Iterables.concat(naturalEndpoints, pendingEndpoints), responseHandler, localDataCenter, consistency_level);
View Full Code Here

Examples of org.apache.cassandra.locator.AbstractReplicationStrategy

    }

    // same as above except does not initiate writes (but does perfrom availability checks).
    private static WriteResponseHandlerWrapper wrapResponseHandler(RowMutation mutation, ConsistencyLevel consistency_level, WriteType writeType)
    {
        AbstractReplicationStrategy rs = Table.open(mutation.getTable()).getReplicationStrategy();
        String table = mutation.getTable();
        Token tk = StorageService.getPartitioner().getToken(mutation.key());
        List<InetAddress> naturalEndpoints = StorageService.instance.getNaturalEndpoints(table, tk);
        Collection<InetAddress> pendingEndpoints = StorageService.instance.getTokenMetadata().pendingEndpointsFor(tk, table);
        AbstractWriteResponseHandler responseHandler = rs.getWriteResponseHandler(naturalEndpoints, pendingEndpoints, consistency_level, null, writeType);
        return new WriteResponseHandlerWrapper(responseHandler, mutation);
    }
View Full Code Here

Examples of org.apache.cassandra.locator.AbstractReplicationStrategy

        }
        else
        {
            // Exit now if we can't fulfill the CL here instead of forwarding to the leader replica
            String table = cm.getTable();
            AbstractReplicationStrategy rs = Table.open(table).getReplicationStrategy();
            Token tk = StorageService.getPartitioner().getToken(cm.key());
            List<InetAddress> naturalEndpoints = StorageService.instance.getNaturalEndpoints(table, tk);
            Collection<InetAddress> pendingEndpoints = StorageService.instance.getTokenMetadata().pendingEndpointsFor(tk, table);

            rs.getWriteResponseHandler(naturalEndpoints, pendingEndpoints, cm.consistency(), null, WriteType.COUNTER).assureSufficientLiveNodes();

            // Forward the actual update to the chosen leader replica
            AbstractWriteResponseHandler responseHandler = new WriteResponseHandler(endpoint, WriteType.COUNTER);

            Tracing.trace("Enqueuing counter update to {}", endpoint);
View Full Code Here

Examples of org.apache.cassandra.locator.AbstractReplicationStrategy

                                                     String localDataCenter,
                                                     WritePerformer performer)
    throws UnavailableException, TimeoutException, IOException
    {
        String table = mutation.getTable();
        AbstractReplicationStrategy rs = Table.open(table).getReplicationStrategy();

        Collection<InetAddress> writeEndpoints = getWriteEndpoints(table, mutation.key());

        IWriteResponseHandler responseHandler = rs.getWriteResponseHandler(writeEndpoints, consistency_level);

        // exit early if we can't fulfill the CL at this time
        responseHandler.assureSufficientLiveNodes();

        performer.apply(mutation, writeEndpoints, responseHandler, localDataCenter, consistency_level);
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.