Package voldemort.client.protocol.admin

Examples of voldemort.client.protocol.admin.AdminClient$QuotaManagementOperations


                                                                        storeXmlFile,
                                                                        props);
            server = ServerTestUtils.startVoldemortServer(socketStoreFactory,
                                                          config,
                                                          currentCluster);
            AdminClient adminClient = ServerTestUtils.getAdminClient(currentCluster);

            // this is just some random admin operation to trigger the loading
            // of the fetcher class when request handler is instantiated
            adminClient.readonlyOps.getROMaxVersionDir(0, new ArrayList<String>());
View Full Code Here


        String clusterStr = FileUtils.readFileToString(new File(clusterXml));
        Cluster cluster = new ClusterMapper().readCluster(new StringReader(clusterStr));
        ExecutorService executor = Executors.newFixedThreadPool(cluster.getNumberOfNodes());
        StoreSwapper swapper = null;
        AdminClient adminClient = null;

        DefaultHttpClient httpClient = null;
        if(useAdminServices) {
            adminClient = new AdminClient(cluster, new AdminClientConfig(), new ClientConfig());
            swapper = new AdminStoreSwapper(cluster, executor, adminClient, timeoutMs);
        } else {
            int numConnections = cluster.getNumberOfNodes() + 3;
            ThreadSafeClientConnManager connectionManager = new ThreadSafeClientConnManager();
            httpClient = new DefaultHttpClient(connectionManager);

            HttpParams clientParams = httpClient.getParams();

            connectionManager.setMaxTotal(numConnections);
            connectionManager.setDefaultMaxPerRoute(numConnections);
            HttpConnectionParams.setSoTimeout(clientParams, timeoutMs);

            swapper = new HttpStoreSwapper(cluster, executor, httpClient, mgmtPath);
        }

        try {
            long start = System.currentTimeMillis();
            if(rollbackStore) {
                swapper.invokeRollback(storeName, pushVersion.longValue());
            } else {
                swapper.swapStoreData(storeName, filePath, pushVersion.longValue());
            }
            long end = System.currentTimeMillis();
            logger.info("Succeeded on all nodes in " + ((end - start) / Time.MS_PER_SECOND)
                        + " seconds.");
        } finally {
            if(useAdminServices && adminClient != null)
                adminClient.close();
            executor.shutdownNow();
            executor.awaitTermination(1, TimeUnit.SECONDS);
            VoldemortIOUtils.closeQuietly(httpClient);
        }
        System.exit(0);
View Full Code Here

                format = AdminParserUtils.ARG_FORMAT_JSON;
            }

            // execute command
            File directory = AdminToolUtils.createDir(dir);
            AdminClient adminClient = AdminToolUtils.getAdminClient(url);

            if(!orphaned && allParts) {
                partIds = AdminToolUtils.getAllPartitions(adminClient);
            }
View Full Code Here

                                                         // Create an admin
                                                         // client which will be
                                                         // used by
                                                         // everyone
                                                         AdminClient adminClient = null;

                                                         // Executor inside
                                                         // executor - your mind
                                                         // just
                                                         // exploded!
                                                         ExecutorService internalExecutor = null;

                                                         try {
                                                             // Retrieve admin
                                                             // client for
                                                             // verification of
                                                             // schema + pushing
                                                             adminClient = new AdminClient(url,
                                                                                           new AdminClientConfig(),
                                                                                           new ClientConfig());

                                                             // Verify the store
                                                             // exists ( If not,
                                                             // add it
                                                             // the
                                                             // store )
                                                             Pair<StoreDefinition, Cluster> metadata = verifySchema(storeName,
                                                                                                                    url,
                                                                                                                    inputDirsPerStore.get(storeName),
                                                                                                                    adminClient);

                                                             // Populate the url
                                                             // to cluster
                                                             // metadata
                                                             urlToCluster.put(url,
                                                                              metadata.getSecond());

                                                             // Create output
                                                             // directory path
                                                             URI uri = new URI(url);

                                                             Path outputDirPath = new Path(outputDir
                                                                                                   + Path.SEPARATOR
                                                                                                   + storeName,
                                                                                           uri.getHost());

                                                             log.info("Running build phase for store '"
                                                                      + storeName
                                                                      + "' and url '"
                                                                      + url
                                                                      + "'. Reading from input directory '"
                                                                      + inputDirsPerStore.get(storeName)
                                                                      + "' and writing to "
                                                                      + outputDirPath);

                                                             runBuildStore(metadata.getSecond(),
                                                                           metadata.getFirst(),
                                                                           inputDirsPerStore.get(storeName),
                                                                           outputDirPath);

                                                             log.info("Finished running build phase for store "
                                                                      + storeName
                                                                      + " and url '"
                                                                      + url
                                                                      + "'. Written to directory "
                                                                      + outputDirPath);

                                                             long storePushVersion = pushVersion;
                                                             if(storePushVersion == -1L) {
                                                                 log.info("Retrieving version number for store '"
                                                                          + storeName
                                                                          + "' and cluster '"
                                                                          + url
                                                                          + "'");

                                                                 Map<String, Long> pushVersions = adminClient.readonlyOps.getROMaxVersion(Lists.newArrayList(storeName));

                                                                 if(pushVersions == null
                                                                    || !pushVersions.containsKey(storeName)) {
                                                                     throw new RuntimeException("Could not retrieve version for store '"
                                                                                                + storeName
                                                                                                + "'");
                                                                 }

                                                                 storePushVersion = pushVersions.get(storeName);
                                                                 storePushVersion++;

                                                                 log.info("Retrieved max version number for store '"
                                                                          + storeName
                                                                          + "' and cluster '"
                                                                          + url
                                                                          + "' = "
                                                                          + storePushVersion);
                                                             }

                                                             log.info("Running push for cluster url "
                                                                      + url);

                                                             // Used for
                                                             // parallel pushing
                                                             internalExecutor = Executors.newCachedThreadPool();

                                                             AdminStoreSwapper swapper = new AdminStoreSwapper(metadata.getSecond(),
                                                                                                               internalExecutor,
                                                                                                               adminClient,
                                                                                                               1000 * props.getInt("timeout.seconds",
                                                                                                                                   24 * 60 * 60),
                                                                                                               true,
                                                                                                               true);

                                                             // Convert to
                                                             // hadoop specific
                                                             // path
                                                             String outputDirPathString = outputDirPath.makeQualified(fs)
                                                                                                       .toString();

                                                             if(!fs.exists(outputDirPath)) {
                                                                 throw new RuntimeException("Output directory for store "
                                                                                            + storeName
                                                                                            + " and cluster '"
                                                                                            + url
                                                                                            + "' - "
                                                                                            + outputDirPathString
                                                                                            + " does not exist");
                                                             }

                                                             log.info("Pushing data to store '"
                                                                      + storeName + "' on cluster "
                                                                      + url + " from path  "
                                                                      + outputDirPathString
                                                                      + " with version "
                                                                      + storePushVersion);

                                                             List<String> nodeDirs = swapper.invokeFetch(storeName,
                                                                                                         outputDirPathString,
                                                                                                         storePushVersion);

                                                             log.info("Successfully pushed data to store '"
                                                                      + storeName
                                                                      + "' on cluster "
                                                                      + url
                                                                      + " from path  "
                                                                      + outputDirPathString
                                                                      + " with version "
                                                                      + storePushVersion);

                                                             return nodeDirs;
                                                         } finally {
                                                             if(internalExecutor != null) {
                                                                 internalExecutor.shutdownNow();
                                                                 internalExecutor.awaitTermination(10,
                                                                                                   TimeUnit.SECONDS);
                                                             }
                                                             if(adminClient != null) {
                                                                 adminClient.close();
                                                             }
                                                         }
                                                     }

                                                 }));

                }

            }

            for(final String storeName: storeNames) {
                for(int index = 0; index < clusterUrls.size(); index++) {
                    Pair<String, String> key = Pair.create(clusterUrls.get(index), storeName);
                    Future<List<String>> nodeDirs = fetchDirsPerStoreCluster.get(key);
                    try {
                        nodeDirPerClusterStore.put(key, nodeDirs.get());
                    } catch(Exception e) {
                        exceptions.put(key, e);
                    }
                }
            }

        } finally {
            if(executor != null) {
                executor.shutdownNow();
                executor.awaitTermination(10, TimeUnit.SECONDS);
            }
        }

        // ===== If we got exceptions during the build + push, delete data from
        // successful
        // nodes ======
        if(!exceptions.isEmpty()) {

            log.error("Got an exception during pushes. Deleting data already pushed on successful nodes");

            for(int index = 0; index < clusterUrls.size(); index++) {
                String clusterUrl = clusterUrls.get(index);
                Cluster cluster = urlToCluster.get(clusterUrl);

                AdminClient adminClient = null;
                try {
                    adminClient = new AdminClient(cluster,
                                                  new AdminClientConfig(),
                                                  new ClientConfig());
                    for(final String storeName: storeNames) {
                        // Check if the [ cluster , store name ] succeeded. We
                        // need to roll it back
                        Pair<String, String> key = Pair.create(clusterUrl, storeName);

                        if(nodeDirPerClusterStore.containsKey(key)) {
                            List<String> nodeDirs = nodeDirPerClusterStore.get(key);

                            log.info("Deleting data for successful pushes to " + clusterUrl
                                     + " and store " + storeName);
                            int nodeId = 0;
                            for(String nodeDir: nodeDirs) {
                                try {
                                    log.info("Deleting data ( " + nodeDir
                                             + " ) for successful pushes to '" + clusterUrl
                                             + "' and store '" + storeName + "' and node " + nodeId);
                                    adminClient.readonlyOps.failedFetchStore(nodeId,
                                                                             storeName,
                                                                             nodeDir);
                                    log.info("Successfully deleted data for successful pushes to '"
                                             + clusterUrl + "' and store '" + storeName
                                             + "' and node " + nodeId);

                                } catch(Exception e) {
                                    log.error("Failure while deleting data on node " + nodeId
                                              + " for store '" + storeName + "' and url '"
                                              + clusterUrl + "'");
                                }
                                nodeId++;
                            }
                        }
                    }
                } finally {
                    if(adminClient != null) {
                        adminClient.close();
                    }
                }
            }

            int errorNo = 1;
            for(Pair<String, String> key: exceptions.keySet()) {
                log.error("Error no " + errorNo + "] Error pushing for cluster '" + key.getFirst()
                          + "' and store '" + key.getSecond() + "' :", exceptions.get(key));
                errorNo++;
            }

            throw new VoldemortException("Exception during build + push");
        }

        // ====== Delete the temporary directory since we don't require it
        // ======
        if(!props.getBoolean("build.output.keep", false)) {
            JobConf jobConf = new JobConf();

            if(props.containsKey("hadoop.job.ugi")) {
                jobConf.set("hadoop.job.ugi", props.getString("hadoop.job.ugi"));
            }

            log.info("Deleting output directory since we have finished the pushes " + outputDir);
            HadoopUtils.deletePathIfExists(jobConf, outputDir.toString());
            log.info("Successfully deleted output directory since we have finished the pushes"
                     + outputDir);
        }

        // ====== Time to swap the stores one node at a time ========
        try {
            for(int index = 0; index < clusterUrls.size(); index++) {
                String url = clusterUrls.get(index);
                Cluster cluster = urlToCluster.get(url);

                AdminClient adminClient = new AdminClient(cluster,
                                                          new AdminClientConfig(),
                                                          new ClientConfig());

                log.info("Swapping all stores on cluster " + url);
                try {
                    // Go over every node and swap
                    for(Node node: cluster.getNodes()) {

                        log.info("Swapping all stores on cluster " + url + " and node "
                                 + node.getId());

                        // Go over every store and swap
                        for(String storeName: storeNames) {

                            Pair<String, String> key = Pair.create(url, storeName);
                            log.info("Swapping '" + storeName + "' store on cluster " + url
                                     + " and node " + node.getId() + " - "
                                     + nodeDirPerClusterStore.get(key).get(node.getId()));

                            previousNodeDirPerClusterStore.put(key,
                                                               Pair.create(node.getId(),
                                                                           adminClient.readonlyOps.swapStore(node.getId(),
                                                                                                             storeName,
                                                                                                             nodeDirPerClusterStore.get(key)
                                                                                                                                   .get(node.getId()))));
                            log.info("Successfully swapped '" + storeName + "' store on cluster "
                                     + url + " and node " + node.getId());

                        }

                    }
                } finally {
                    adminClient.close();

                }
            }
        } catch(Exception e) {

            log.error("Got an exception during swaps. Rolling back data already pushed on successful nodes");

            for(Pair<String, String> clusterStoreTuple: previousNodeDirPerClusterStore.keySet()) {
                Collection<Pair<Integer, String>> nodeToPreviousDirs = previousNodeDirPerClusterStore.get(clusterStoreTuple);
                String url = clusterStoreTuple.getFirst();
                Cluster cluster = urlToCluster.get(url);

                log.info("Rolling back for cluster " + url + " and store  "
                         + clusterStoreTuple.getSecond());

                AdminClient adminClient = new AdminClient(cluster,
                                                          new AdminClientConfig(),
                                                          new ClientConfig());
                try {
                    for(Pair<Integer, String> nodeToPreviousDir: nodeToPreviousDirs) {
                        log.info("Rolling back for cluster " + url + " and store "
                                 + clusterStoreTuple.getSecond() + " and node "
                                 + nodeToPreviousDir.getFirst() + " to dir "
                                 + nodeToPreviousDir.getSecond());
                        adminClient.readonlyOps.rollbackStore(nodeToPreviousDir.getFirst(),
                                                              nodeToPreviousDir.getSecond(),
                                                              ReadOnlyUtils.getVersionId(new File(nodeToPreviousDir.getSecond())));
                        log.info("Successfully rolled back for cluster " + url + " and store "
                                 + clusterStoreTuple.getSecond() + " and node "
                                 + nodeToPreviousDir.getFirst() + " to dir "
                                 + nodeToPreviousDir.getSecond());

                    }
                } finally {
                    adminClient.close();
                }
            }
            throw e;
        }
    }
View Full Code Here

            // execute command
            if(!AdminToolUtils.askConfirm(confirm, "cleanup orphaned data")) {
                return;
            }

            AdminClient adminClient = AdminToolUtils.getAdminClient(url);

            if(allNodes) {
                nodeIds = AdminToolUtils.getAllNodeIds(adminClient);
            }
View Full Code Here

            // execute command
            if(!AdminToolUtils.askConfirm(confirm, "cleanup vector clocks")) {
                return;
            }

            AdminClient adminClient = AdminToolUtils.getAdminClient(url);

            if(allNodes) {
                nodeIds = AdminToolUtils.getAllNodeIds(adminClient);
            }
View Full Code Here

            // execute command
            if(!AdminToolUtils.askConfirm(confirm, "cleanup slops"))
                return;

            AdminClient adminClient = AdminToolUtils.getAdminClient(url);

            if(allNodes) {
                nodeIds = AdminToolUtils.getAllNodeIds(adminClient);
            }
View Full Code Here

                metaKeys.add(MetadataStore.CLUSTER_KEY);
                metaKeys.add(MetadataStore.STORES_KEY);
                metaKeys.add(MetadataStore.SERVER_STATE_KEY);
            }

            AdminClient adminClient = AdminToolUtils.getAdminClient(url);

            doMetaCheck(adminClient, metaKeys);
        }
View Full Code Here

                                              new Properties(),
                                              sourceCluster);

        Properties adminProperties = new Properties();
        adminProperties.setProperty("max_connections", "2");
        adminClient = new AdminClient(servers[0].getMetadataStore().getCluster(),
                                      new AdminClientConfig(adminProperties),
                                      new ClientConfig());
    }
View Full Code Here

    public void tearDown() {
        socketStoreFactory.close();
    }

    private AdminClient getAdminClient(Cluster newCluster) {
        return new AdminClient(newCluster, new AdminClientConfig(), new ClientConfig());
    }
View Full Code Here

TOP

Related Classes of voldemort.client.protocol.admin.AdminClient$QuotaManagementOperations

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.