Package voldemort.client.protocol.admin

Examples of voldemort.client.protocol.admin.AdminClient


            String bootstrapUrl = getBootstrapUrl(currentCluster, 0);
            final ClusterTestUtils.RebalanceKit rebalanceKit = ClusterTestUtils.getRebalanceKit(bootstrapUrl,
                                                                                                finalCluster);

            try {
                AdminClient adminClient = rebalanceKit.controller.getAdminClient();
                populateData(currentCluster, rwStoreDefWithReplication, adminClient, false);

                // Figure out the positive and negative keys to check
                List<ByteArray> positiveTestKeyList = sampleKeysFromPartition(adminClient,
                                                                              0,
View Full Code Here


            populateData(updatedCurrentCluster,
                         rwStoreDefWithReplication,
                         rebalanceKit.controller.getAdminClient(),
                         false);

            final AdminClient adminClient = rebalanceKit.controller.getAdminClient();
            // the plan would cause these partitions to move
            // Partition : Donor -> Stealer
            // p2 (SEC) : s1 -> s0
            // p3 (PRI) : s1 -> s2
            final List<ByteArray> movingKeysList = sampleKeysFromPartition(adminClient,
                                                                           1,
                                                                           rwStoreDefWithReplication.getName(),
                                                                           Arrays.asList(2, 3),
                                                                           20);
            assertTrue("Empty list of moving keys...", movingKeysList.size() > 0);
            final AtomicBoolean rebalancingStarted = new AtomicBoolean(false);
            final AtomicBoolean proxyWritesDone = new AtomicBoolean(false);
            final HashMap<String, String> baselineTuples = new HashMap<String, String>(testEntries);
            final HashMap<String, VectorClock> baselineVersions = new HashMap<String, VectorClock>();

            for(String key: baselineTuples.keySet()) {
                baselineVersions.put(key, new VectorClock());
            }

            final CountDownLatch latch = new CountDownLatch(2);
            // start get operation.
            executors.execute(new Runnable() {

                @Override
                public void run() {
                    SocketStoreClientFactory factory = null;
                    try {
                        // wait for the rebalancing to begin.
                        List<VoldemortServer> serverList = Lists.newArrayList(serverMap.get(0),
                                                                              serverMap.get(2));
                        while(!rebalancingComplete.get()) {
                            Iterator<VoldemortServer> serverIterator = serverList.iterator();
                            while(serverIterator.hasNext()) {
                                VoldemortServer server = serverIterator.next();
                                if(ByteUtils.getString(server.getMetadataStore()
                                                             .get(MetadataStore.SERVER_STATE_KEY,
                                                                  null)
                                                             .get(0)
                                                             .getValue(),
                                                       "UTF-8")
                                            .compareTo(VoldemortState.REBALANCING_MASTER_SERVER.toString()) == 0) {
                                    logger.info("Server " + server.getIdentityNode().getId()
                                                + " transitioned into REBALANCING MODE");
                                    serverIterator.remove();
                                }
                            }
                            if(serverList.size() == 0) {
                                rebalancingStarted.set(true);
                                break;
                            }
                        }

                        if(!rebalancingComplete.get()) {
                            factory = new SocketStoreClientFactory(new ClientConfig().setBootstrapUrls(getBootstrapUrl(updatedCurrentCluster,
                                                                                                                       0))
                                                                                     .setEnableLazy(false)
                                                                                     .setSocketTimeout(120,
                                                                                                       TimeUnit.SECONDS));

                            final StoreClient<String, String> storeClientRW = new DefaultStoreClient<String, String>(testStoreNameRW,
                                                                                                                     null,
                                                                                                                     factory,
                                                                                                                     3);
                            // Now perform some writes and determine the end
                            // state
                            // of the changed keys. Initially, all data now with
                            // zero vector clock
                            for(ByteArray movingKey: movingKeysList) {
                                try {
                                    if(rebalancingComplete.get()) {
                                        break;
                                    }
                                    String keyStr = ByteUtils.getString(movingKey.get(), "UTF-8");
                                    String valStr = "proxy_write";
                                    storeClientRW.put(keyStr, valStr);
                                    baselineTuples.put(keyStr, valStr);
                                    // all these keys will have [2:1] vector
                                    // clock
                                    // is node 2 is the pseudo master in both
                                    // moves
                                    baselineVersions.get(keyStr)
                                                    .incrementVersion(2, System.currentTimeMillis());
                                    proxyWritesDone.set(true);
                                } catch(InvalidMetadataException e) {
                                    // let this go
                                    logger.error("Encountered an invalid metadata exception.. ", e);
                                }
                            }
                        }
                    } catch(Exception e) {
                        logger.error("Exception in proxy put thread", e);
                        exceptions.add(e);
                    } finally {
                        if(factory != null)
                            factory.close();
                        latch.countDown();
                    }
                }

            });

            executors.execute(new Runnable() {

                @Override
                public void run() {
                    try {
                        rebalanceKit.rebalance();
                    } catch(Exception e) {
                        logger.error("Error in rebalancing... ", e);
                        exceptions.add(e);
                    } finally {
                        rebalancingComplete.set(true);
                        latch.countDown();
                    }
                }
            });

            latch.await();
            executors.shutdown();
            executors.awaitTermination(300, TimeUnit.SECONDS);

            assertEquals("Client did not see all server transition into rebalancing state",
                         rebalancingStarted.get(),
                         true);
            assertEquals("Not enough time to begin proxy writing", proxyWritesDone.get(), true);
            checkEntriesPostRebalance(updatedCurrentCluster,
                                      finalCluster,
                                      Lists.newArrayList(rwStoreDefWithReplication),
                                      Arrays.asList(0, 1, 2),
                                      baselineTuples,
                                      baselineVersions);
            checkConsistentMetadata(finalCluster, serverList);
            // check No Exception
            if(exceptions.size() > 0) {

                for(Exception e: exceptions) {
                    e.printStackTrace();
                }
                fail("Should not see any exceptions.");
            }
            // check that the proxy writes were made to the original donor, node
            // 1
            List<ClockEntry> clockEntries = new ArrayList<ClockEntry>(serverList.size());
            for(Integer nodeid: serverList)
                clockEntries.add(new ClockEntry(nodeid.shortValue(), System.currentTimeMillis()));
            VectorClock clusterXmlClock = new VectorClock(clockEntries, System.currentTimeMillis());
            for(Integer nodeid: serverList)
                adminClient.metadataMgmtOps.updateRemoteCluster(nodeid,
                                                                currentCluster,
                                                                clusterXmlClock);

            adminClient.setAdminClientCluster(currentCluster);
            checkForTupleEquivalence(adminClient,
                                     1,
                                     testStoreNameRW,
                                     movingKeysList,
                                     baselineTuples,
View Full Code Here

            // execute command
            if(!AdminToolUtils.askConfirm(confirm, "delete store")) {
                return;
            }

            AdminClient adminClient = AdminToolUtils.getAdminClient(url);

            if(allNodes) {
                nodeIds = AdminToolUtils.getAllNodeIds(adminClient);
            }
View Full Code Here

            // execute command
            if(!AdminToolUtils.askConfirm(confirm, "rollback read-only store")) {
                return;
            }

            AdminClient adminClient = AdminToolUtils.getAdminClient(url);

            if(allNodes) {
                nodeIds = AdminToolUtils.getAllNodeIds(adminClient);
            }
View Full Code Here

                                + nodeId + "( " + partitionIds + " )");
                }

                @Override
                public void operate() {
                    AdminClient adminClient = AdminClient.createTempAdminClient(voldemortConfig,
                                                                                metadataStore.getCluster(),
                                                                                voldemortConfig.getClientMaxConnectionsPerNode());
                    try {
                        StorageEngine<ByteArray, byte[], byte[]> storageEngine = getStorageEngine(storeRepository,
                                                                                                  storeName);

                        EventThrottler throttler = new EventThrottler(voldemortConfig.getStreamMaxWriteBytesPerSec());

                        if(isReadOnlyStore) {
                            ReadOnlyStorageEngine readOnlyStorageEngine = ((ReadOnlyStorageEngine) storageEngine);
                            String destinationDir = readOnlyStorageEngine.getCurrentDirPath();
                            logger.info("Fetching files for RO store '" + storeName
                                        + "' from node " + nodeId + " ( " + partitionIds + " )");
                            updateStatus("Fetching files for RO store '" + storeName
                                         + "' from node " + nodeId + " ( " + partitionIds + " )");

                            adminClient.readonlyOps.fetchPartitionFiles(nodeId,
                                                                        storeName,
                                                                        partitionIds,
                                                                        destinationDir,
                                                                        readOnlyStorageEngine.getChunkedFileSet()
                                                                                             .getChunkIdToNumChunks()
                                                                                             .keySet(),
                                                                        running);

                        } else {
                            logger.info("Fetching entries for RW store '" + storeName
                                        + "' from node " + nodeId + " ( " + partitionIds + " )");
                            updateStatus("Fetching entries for RW store '" + storeName
                                         + "' from node " + nodeId + " ( " + partitionIds + " ) ");

                            if(partitionIds.size() > 0) {
                                Iterator<Pair<ByteArray, Versioned<byte[]>>> entriesIterator = adminClient.bulkFetchOps.fetchEntries(nodeId,
                                                                                                                                     storeName,
                                                                                                                                     partitionIds,
                                                                                                                                     filter,
                                                                                                                                     false,
                                                                                                                                     initialCluster,
                                                                                                                                     0);
                                long numTuples = 0;
                                long startTime = System.currentTimeMillis();
                                long startNs = System.nanoTime();
                                while(running.get() && entriesIterator.hasNext()) {

                                    Pair<ByteArray, Versioned<byte[]>> entry = entriesIterator.next();
                                    if(streamingStats != null) {
                                        streamingStats.reportNetworkTime(Operation.UPDATE_ENTRIES,
                                                                         Utils.elapsedTimeNs(startNs,
                                                                                             System.nanoTime()));
                                    }
                                    ByteArray key = entry.getFirst();
                                    Versioned<byte[]> value = entry.getSecond();
                                    startNs = System.nanoTime();
                                    try {
                                        /**
                                         * TODO This also needs to be fixed to
                                         * use the atomic multi version puts
                                         */
                                        storageEngine.put(key, value, null);
                                    } catch(ObsoleteVersionException e) {
                                        // log and ignore
                                        logger.debug("Fetch and update threw Obsolete version exception. Ignoring");
                                    } finally {
                                        if(streamingStats != null) {
                                            streamingStats.reportStreamingPut(Operation.UPDATE_ENTRIES);
                                            streamingStats.reportStorageTime(Operation.UPDATE_ENTRIES,
                                                                             Utils.elapsedTimeNs(startNs,
                                                                                                 System.nanoTime()));
                                        }
                                    }

                                    long totalTime = (System.currentTimeMillis() - startTime) / 1000;
                                    throttler.maybeThrottle(key.length() + valueSize(value));
                                    if((numTuples % 100000) == 0 && numTuples > 0) {
                                        logger.info(numTuples + " entries copied from node "
                                                    + nodeId + " for store '" + storeName + "'c");
                                        updateStatus(numTuples + " entries copied from node "
                                                     + nodeId + " for store '" + storeName
                                                     + "' in " + totalTime + " seconds");
                                    }
                                    numTuples++;
                                    startNs = System.nanoTime();
                                }

                                long totalTime = (System.currentTimeMillis() - startTime) / 1000;
                                if(running.get()) {
                                    logger.info("Completed fetching " + numTuples
                                                + " entries from node " + nodeId + " for store '"
                                                + storeName + "' in " + totalTime + " seconds");
                                } else {
                                    logger.info("Fetch and update stopped after fetching "
                                                + numTuples + " entries for node " + nodeId
                                                + " for store '" + storeName + "' in " + totalTime
                                                + " seconds");
                                }
                            } else {
                                logger.info("No entries to fetch from node " + nodeId
                                            + " for store '" + storeName + "'");
                            }
                        }

                    } finally {
                        adminClient.close();
                    }
                }
            });

        } catch(VoldemortException e) {
View Full Code Here

            // execute command
            if(!AdminToolUtils.askConfirm(confirm, "truncate partition")) {
                return;
            }

            AdminClient adminClient = AdminToolUtils.getAdminClient(url);

            if(allStores) {
                storeNames = AdminToolUtils.getAllUserStoreNamesOnNode(adminClient, nodeId);
            } else {
                AdminToolUtils.validateUserStoreNamesOnNode(adminClient, nodeId, storeNames);
View Full Code Here

            // execute command
            if(!AdminToolUtils.askConfirm(confirm, "truncate store")) {
                return;
            }

            AdminClient adminClient = AdminToolUtils.getAdminClient(url);

            if(allNodes) {
                nodeIds = AdminToolUtils.getAllNodeIds(adminClient);
            }
View Full Code Here

    }

    public ZoneShrinkageCLI(String url, Integer droppingZoneId) {
        AdminClientConfig acc = new AdminClientConfig();
        ClientConfig cc = new ClientConfig();
        adminClient = new AdminClient(url, acc, cc);
        this.droppingZoneId = droppingZoneId;
        this.bootstrapUrl = url;
    }
View Full Code Here

     *
     * @param url URL pointing to the bootstrap node
     * @return Newly constructed AdminClient
     */
    public static AdminClient getAdminClient(String url) {
        return new AdminClient(url, new AdminClientConfig(), new ClientConfig());
    }
View Full Code Here

    }

    @JmxOperation(description = "force restore data from replication")
    public void restoreDataFromReplication(int numberOfParallelTransfers) {

        AdminClient adminClient = AdminClient.createTempAdminClient(voldemortConfig,
                                                                    metadata.getCluster(),
                                                                    numberOfParallelTransfers * 2);
        try {
            adminClient.restoreOps.restoreDataFromReplications(metadata.getNodeId(),
                                                               numberOfParallelTransfers);
        } finally {
            adminClient.close();
        }
    }
View Full Code Here

TOP

Related Classes of voldemort.client.protocol.admin.AdminClient

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.