Package voldemort.cluster

Examples of voldemort.cluster.Cluster


        assertEquals(Utils.nodeListToNodeIdList(nodes).get(0).intValue(), 0);
    }

    @Test
    public void testGetClusterWithNewNodes() {
        Cluster cluster = ServerTestUtils.getLocalCluster(2, 10, 1);

        // Generate a new cluster which contains 4 nodes instead of 2
        List<Node> nodes = Lists.newArrayList();
        for(int nodeId = 0; nodeId < 4; nodeId++) {
            List<Integer> partitionIds = Lists.newArrayList();
            for(int partitionId = nodeId * 5; partitionId < (nodeId + 1) * 5; partitionId++) {
                partitionIds.add(partitionId);
            }
            Node node = new Node(nodeId, "b", 0, 1, 2, 0, partitionIds);
            nodes.add(node);
        }
        Cluster newCluster = new Cluster(cluster.getName(),
                                         nodes,
                                         Lists.newArrayList(cluster.getZones()));

        Cluster generatedCluster = RebalanceUtils.getInterimCluster(cluster, newCluster);
        assertEquals(generatedCluster.getNumberOfNodes(), 4);
        assertEquals(Utils.compareList(generatedCluster.getNodeById(0).getPartitionIds(),
                                       cluster.getNodeById(0).getPartitionIds()), true);
        assertEquals(Utils.compareList(generatedCluster.getNodeById(1).getPartitionIds(),
                                       cluster.getNodeById(1).getPartitionIds()), true);
        assertEquals(generatedCluster.getNodeById(2).getPartitionIds().size(), 0);
        assertEquals(generatedCluster.getNodeById(3).getPartitionIds().size(), 0);
    }
View Full Code Here


    private void doClusterTransformationBase(Cluster currentC,
                                             Cluster interimC,
                                             Cluster finalC,
                                             boolean verify) {
        Cluster derivedInterim1 = RebalanceUtils.getInterimCluster(currentC, interimC);
        if(verify)
            assertEquals(interimC, derivedInterim1);

        Cluster derivedInterim2 = RebalanceUtils.getInterimCluster(currentC, finalC);
        if(verify)
            assertEquals(interimC, derivedInterim2);

        RebalanceUtils.validateCurrentFinalCluster(currentC, finalC);
        RebalanceUtils.validateCurrentInterimCluster(currentC, interimC);
View Full Code Here

        System.exit(1);
    }

    private List<Integer> getNodes(int partition) {
        List<Integer> rv = new LinkedList<Integer>();
        Cluster cluster = adminClient.getAdminClientCluster();
        for(Node node: cluster.getNodes()) {
            if(node.getPartitionIds().contains(partition))
                rv.add(node.getId());
        }

        return rv;
View Full Code Here

        return rv;
    }

    private List<Integer> getPartitions(int nodeId) {
        Cluster cluster = adminClient.getAdminClientCluster();
        Node node = cluster.getNodeById(nodeId);
        return node.getPartitionIds();
    }
View Full Code Here

                        String clusterXMLPath = metadataValuePair.get(metadataKeyPair.indexOf(MetadataStore.CLUSTER_KEY));
                        clusterXMLPath = clusterXMLPath.replace("~",
                                                                System.getProperty("user.home"));
                        if(!Utils.isReadableFile(clusterXMLPath))
                            throw new VoldemortException("Cluster xml file path incorrect");
                        Cluster cluster = clusterMapper.readCluster(new File(clusterXMLPath));

                        String storesXMLPath = metadataValuePair.get(metadataKeyPair.indexOf(MetadataStore.STORES_KEY));
                        storesXMLPath = storesXMLPath.replace("~", System.getProperty("user.home"));
                        if(!Utils.isReadableFile(storesXMLPath))
                            throw new VoldemortException("Stores definition xml file path incorrect");
                        List<StoreDefinition> newStoreDefs = storeDefsMapper.readStoreList(new File(storesXMLPath));
                        StoreDefinitionUtils.validateSchemasAsNeeded(newStoreDefs);

                        executeSetMetadataPair(nodeId,
                                               adminClient,
                                               MetadataStore.CLUSTER_KEY,
                                               clusterMapper.writeCluster(cluster),
                                               MetadataStore.STORES_KEY,
                                               storeDefsMapper.writeStoreList(newStoreDefs));
                        executeUpdateMetadataVersionsOnStores(adminClient,
                                                              oldStoreDefs,
                                                              newStoreDefs);
                    } else {
                        throw new VoldemortException("set-metadata-pair keys should be <cluster.xml, stores.xml>");
                    }
                }
            } else if(options.has("set-metadata")) {

                String metadataKey = (String) options.valueOf("set-metadata");
                if(!options.has("set-metadata-value")) {
                    throw new VoldemortException("Missing set-metadata-value");
                } else {
                    String metadataValue = (String) options.valueOf("set-metadata-value");
                    if(metadataKey.compareTo(MetadataStore.CLUSTER_KEY) == 0
                       || metadataKey.compareTo(MetadataStore.REBALANCING_SOURCE_CLUSTER_XML) == 0) {
                        if(!Utils.isReadableFile(metadataValue))
                            throw new VoldemortException("Cluster xml file path incorrect");
                        ClusterMapper mapper = new ClusterMapper();
                        Cluster newCluster = mapper.readCluster(new File(metadataValue));
                        if(options.has("auto")) {
                            executeSetMetadata(nodeId,
                                               adminClient,
                                               metadataKey,
                                               mapper.writeCluster(newCluster));
View Full Code Here

                    addMetadataValue(storeDefMap, storeDef, nodeName);
                }
            } else {
                if(metadataKey.compareTo(MetadataStore.CLUSTER_KEY) == 0
                   || metadataKey.compareTo(MetadataStore.REBALANCING_SOURCE_CLUSTER_XML) == 0) {
                    Cluster cluster = new ClusterMapper().readCluster(new StringReader(versioned.getValue()));
                    addMetadataValue(metadataNodeValueMap, cluster, nodeName);
                } else if(metadataKey.compareTo(MetadataStore.SERVER_STATE_KEY) == 0) {
                    VoldemortState voldemortStateValue = VoldemortState.valueOf(versioned.getValue());
                    addMetadataValue(metadataNodeValueMap, voldemortStateValue, nodeName);
                } else {
View Full Code Here

    private static void executeShowRoutingPlan(AdminClient adminClient,
                                               String storeName,
                                               List<String> keyList) throws DecoderException {

        Cluster cluster = adminClient.getAdminClientCluster();
        List<StoreDefinition> storeDefs = adminClient.metadataMgmtOps.getRemoteStoreDefList()
                                                                     .getValue();
        StoreDefinition storeDef = StoreDefinitionUtils.getStoreDefinitionWithName(storeDefs,
                                                                                   storeName);
        StoreRoutingPlan routingPlan = new StoreRoutingPlan(cluster, storeDef);
        BaseStoreRoutingPlan bRoutingPlan = new BaseStoreRoutingPlan(cluster, storeDef);

        final int COLUMN_WIDTH = 30;

        for(String keyStr: keyList) {
            byte[] key = ByteUtils.fromHexString(keyStr);
            System.out.println("Key :" + keyStr);
            System.out.println("Replicating Partitions :"
                               + routingPlan.getReplicatingPartitionList(key));
            System.out.println("Replicating Nodes :");
            List<Integer> nodeList = routingPlan.getReplicationNodeList(routingPlan.getMasterPartitionId(key));
            for(int i = 0; i < nodeList.size(); i++) {
                System.out.println(nodeList.get(i) + "\t"
                                   + cluster.getNodeById(nodeList.get(i)).getHost());
            }

            System.out.println("Zone Nary information :");
            HashMap<Integer, Integer> zoneRepMap = storeDef.getZoneReplicationFactor();

            for(Zone zone: cluster.getZones()) {
                System.out.println("\tZone #" + zone.getId());
                int numReplicas = -1;
                if(zoneRepMap == null) {
                    // non zoned cluster
                    numReplicas = storeDef.getReplicationFactor();
                } else {
                    // zoned cluster
                    if(!zoneRepMap.containsKey(zone.getId())) {
                        Utils.croak("Repfactor for Zone " + zone.getId() + " not found in storedef");
                    }
                    numReplicas = zoneRepMap.get(zone.getId());
                }

                String FormatString = "%s %s %s\n";
                System.out.format(FormatString,
                                  Utils.paddedString("REPLICA#", COLUMN_WIDTH),
                                  Utils.paddedString("PARTITION", COLUMN_WIDTH),
                                  Utils.paddedString("NODE", COLUMN_WIDTH));
                for(int i = 0; i < numReplicas; i++) {
                    Integer nodeId = bRoutingPlan.getNodeIdForZoneNary(zone.getId(), i, key);
                    Integer partitionId = routingPlan.getNodesPartitionIdForKey(nodeId, key);
                    System.out.format(FormatString,
                                      Utils.paddedString(i + "", COLUMN_WIDTH),
                                      Utils.paddedString(partitionId.toString(), COLUMN_WIDTH),
                                      Utils.paddedString(nodeId + "("
                                                         + cluster.getNodeById(nodeId).getHost()
                                                         + ")", COLUMN_WIDTH));
                }
                System.out.println();
            }
View Full Code Here

        LinkedList<Integer> proximityList2 = Lists.newLinkedList();
        proximityList2.add(0);
        zones.add(new Zone(2, proximityList2));

        // Use getZEZCluster because zone 1 does not have any partitions in it!
        Cluster cluster = getZEZCluster();
        List<Node> nodeList = new ArrayList<Node>();

        int nodeId = 0; // Needed because node IDs must be contiguous?
        for(Node node: cluster.getNodes()) {
            // Do *not* add node from zone 1.
            if(node.getZoneId() != 1) {
                Node newNode = new Node(nodeId,
                                        node.getHost(),
                                        node.getHttpPort(),
                                        node.getSocketPort(),
                                        node.getAdminPort(),
                                        node.getZoneId(),
                                        node.getPartitionIds());
                nodeList.add(newNode);
                nodeId++;
            }
        }
        Collections.sort(nodeList);
        return new Cluster(cluster.getName(), nodeList, zones);
    }
View Full Code Here

            valuesLeft.add(Integer.toString(i));
        }

        String storeName = "test";
        SerializerDefinition serDef = new SerializerDefinition("string");
        Cluster cluster = ServerTestUtils.getLocalCluster(1);
        Serializer<Object> serializer = (Serializer<Object>) new DefaultSerializerFactory().getSerializer(serDef);

        // write test data to text file
        File inputFile = File.createTempFile("input", ".txt", testDir);
        inputFile.deleteOnExit();
View Full Code Here

        long start = System.currentTimeMillis();
        File file = new File(args[0]);
        int repFactor = Integer.parseInt(args[1]);
        int maxVal = Integer.parseInt(args[2]);
        ClusterMapper mapper = new ClusterMapper();
        Cluster cluster = mapper.readCluster(file);
        RoutingStrategy strategy = new ConsistentRoutingStrategy(cluster, repFactor);
        JsonTypeSerializer serializer = new JsonTypeSerializer(JsonTypeDefinition.INT32);
        Map<Integer, Integer> counts = new HashMap<Integer, Integer>();

        for(int i = 0; i < maxVal; i++) {
View Full Code Here

TOP

Related Classes of voldemort.cluster.Cluster

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.