// Create an admin
// client which will be
// used by
// everyone
AdminClient adminClient = null;
// Executor inside
// executor - your mind
// just
// exploded!
ExecutorService internalExecutor = null;
try {
// Retrieve admin
// client for
// verification of
// schema + pushing
adminClient = new AdminClient(url,
new AdminClientConfig(),
new ClientConfig());
// Verify the store
// exists ( If not,
// add it
// the
// store )
Pair<StoreDefinition, Cluster> metadata = verifySchema(storeName,
url,
inputDirsPerStore.get(storeName),
adminClient);
// Populate the url
// to cluster
// metadata
urlToCluster.put(url,
metadata.getSecond());
// Create output
// directory path
URI uri = new URI(url);
Path outputDirPath = new Path(outputDir
+ Path.SEPARATOR
+ storeName,
uri.getHost());
log.info("Running build phase for store '"
+ storeName
+ "' and url '"
+ url
+ "'. Reading from input directory '"
+ inputDirsPerStore.get(storeName)
+ "' and writing to "
+ outputDirPath);
runBuildStore(metadata.getSecond(),
metadata.getFirst(),
inputDirsPerStore.get(storeName),
outputDirPath);
log.info("Finished running build phase for store "
+ storeName
+ " and url '"
+ url
+ "'. Written to directory "
+ outputDirPath);
long storePushVersion = pushVersion;
if(storePushVersion == -1L) {
log.info("Retrieving version number for store '"
+ storeName
+ "' and cluster '"
+ url
+ "'");
Map<String, Long> pushVersions = adminClient.readonlyOps.getROMaxVersion(Lists.newArrayList(storeName));
if(pushVersions == null
|| !pushVersions.containsKey(storeName)) {
throw new RuntimeException("Could not retrieve version for store '"
+ storeName
+ "'");
}
storePushVersion = pushVersions.get(storeName);
storePushVersion++;
log.info("Retrieved max version number for store '"
+ storeName
+ "' and cluster '"
+ url
+ "' = "
+ storePushVersion);
}
log.info("Running push for cluster url "
+ url);
// Used for
// parallel pushing
internalExecutor = Executors.newCachedThreadPool();
AdminStoreSwapper swapper = new AdminStoreSwapper(metadata.getSecond(),
internalExecutor,
adminClient,
1000 * props.getInt("timeout.seconds",
24 * 60 * 60),
true,
true);
// Convert to
// hadoop specific
// path
String outputDirPathString = outputDirPath.makeQualified(fs)
.toString();
if(!fs.exists(outputDirPath)) {
throw new RuntimeException("Output directory for store "
+ storeName
+ " and cluster '"
+ url
+ "' - "
+ outputDirPathString
+ " does not exist");
}
log.info("Pushing data to store '"
+ storeName + "' on cluster "
+ url + " from path "
+ outputDirPathString
+ " with version "
+ storePushVersion);
List<String> nodeDirs = swapper.invokeFetch(storeName,
outputDirPathString,
storePushVersion);
log.info("Successfully pushed data to store '"
+ storeName
+ "' on cluster "
+ url
+ " from path "
+ outputDirPathString
+ " with version "
+ storePushVersion);
return nodeDirs;
} finally {
if(internalExecutor != null) {
internalExecutor.shutdownNow();
internalExecutor.awaitTermination(10,
TimeUnit.SECONDS);
}
if(adminClient != null) {
adminClient.close();
}
}
}
}));
}
}
for(final String storeName: storeNames) {
for(int index = 0; index < clusterUrls.size(); index++) {
Pair<String, String> key = Pair.create(clusterUrls.get(index), storeName);
Future<List<String>> nodeDirs = fetchDirsPerStoreCluster.get(key);
try {
nodeDirPerClusterStore.put(key, nodeDirs.get());
} catch(Exception e) {
exceptions.put(key, e);
}
}
}
} finally {
if(executor != null) {
executor.shutdownNow();
executor.awaitTermination(10, TimeUnit.SECONDS);
}
}
// ===== If we got exceptions during the build + push, delete data from
// successful
// nodes ======
if(!exceptions.isEmpty()) {
log.error("Got an exception during pushes. Deleting data already pushed on successful nodes");
for(int index = 0; index < clusterUrls.size(); index++) {
String clusterUrl = clusterUrls.get(index);
Cluster cluster = urlToCluster.get(clusterUrl);
AdminClient adminClient = null;
try {
adminClient = new AdminClient(cluster,
new AdminClientConfig(),
new ClientConfig());
for(final String storeName: storeNames) {
// Check if the [ cluster , store name ] succeeded. We
// need to roll it back
Pair<String, String> key = Pair.create(clusterUrl, storeName);
if(nodeDirPerClusterStore.containsKey(key)) {
List<String> nodeDirs = nodeDirPerClusterStore.get(key);
log.info("Deleting data for successful pushes to " + clusterUrl
+ " and store " + storeName);
int nodeId = 0;
for(String nodeDir: nodeDirs) {
try {
log.info("Deleting data ( " + nodeDir
+ " ) for successful pushes to '" + clusterUrl
+ "' and store '" + storeName + "' and node " + nodeId);
adminClient.readonlyOps.failedFetchStore(nodeId,
storeName,
nodeDir);
log.info("Successfully deleted data for successful pushes to '"
+ clusterUrl + "' and store '" + storeName
+ "' and node " + nodeId);
} catch(Exception e) {
log.error("Failure while deleting data on node " + nodeId
+ " for store '" + storeName + "' and url '"
+ clusterUrl + "'");
}
nodeId++;
}
}
}
} finally {
if(adminClient != null) {
adminClient.close();
}
}
}
int errorNo = 1;
for(Pair<String, String> key: exceptions.keySet()) {
log.error("Error no " + errorNo + "] Error pushing for cluster '" + key.getFirst()
+ "' and store '" + key.getSecond() + "' :", exceptions.get(key));
errorNo++;
}
throw new VoldemortException("Exception during build + push");
}
// ====== Delete the temporary directory since we don't require it
// ======
if(!props.getBoolean("build.output.keep", false)) {
JobConf jobConf = new JobConf();
if(props.containsKey("hadoop.job.ugi")) {
jobConf.set("hadoop.job.ugi", props.getString("hadoop.job.ugi"));
}
log.info("Deleting output directory since we have finished the pushes " + outputDir);
HadoopUtils.deletePathIfExists(jobConf, outputDir.toString());
log.info("Successfully deleted output directory since we have finished the pushes"
+ outputDir);
}
// ====== Time to swap the stores one node at a time ========
try {
for(int index = 0; index < clusterUrls.size(); index++) {
String url = clusterUrls.get(index);
Cluster cluster = urlToCluster.get(url);
AdminClient adminClient = new AdminClient(cluster,
new AdminClientConfig(),
new ClientConfig());
log.info("Swapping all stores on cluster " + url);
try {
// Go over every node and swap
for(Node node: cluster.getNodes()) {
log.info("Swapping all stores on cluster " + url + " and node "
+ node.getId());
// Go over every store and swap
for(String storeName: storeNames) {
Pair<String, String> key = Pair.create(url, storeName);
log.info("Swapping '" + storeName + "' store on cluster " + url
+ " and node " + node.getId() + " - "
+ nodeDirPerClusterStore.get(key).get(node.getId()));
previousNodeDirPerClusterStore.put(key,
Pair.create(node.getId(),
adminClient.readonlyOps.swapStore(node.getId(),
storeName,
nodeDirPerClusterStore.get(key)
.get(node.getId()))));
log.info("Successfully swapped '" + storeName + "' store on cluster "
+ url + " and node " + node.getId());
}
}
} finally {
adminClient.close();
}
}
} catch(Exception e) {
log.error("Got an exception during swaps. Rolling back data already pushed on successful nodes");
for(Pair<String, String> clusterStoreTuple: previousNodeDirPerClusterStore.keySet()) {
Collection<Pair<Integer, String>> nodeToPreviousDirs = previousNodeDirPerClusterStore.get(clusterStoreTuple);
String url = clusterStoreTuple.getFirst();
Cluster cluster = urlToCluster.get(url);
log.info("Rolling back for cluster " + url + " and store "
+ clusterStoreTuple.getSecond());
AdminClient adminClient = new AdminClient(cluster,
new AdminClientConfig(),
new ClientConfig());
try {
for(Pair<Integer, String> nodeToPreviousDir: nodeToPreviousDirs) {
log.info("Rolling back for cluster " + url + " and store "
+ clusterStoreTuple.getSecond() + " and node "
+ nodeToPreviousDir.getFirst() + " to dir "
+ nodeToPreviousDir.getSecond());
adminClient.readonlyOps.rollbackStore(nodeToPreviousDir.getFirst(),
nodeToPreviousDir.getSecond(),
ReadOnlyUtils.getVersionId(new File(nodeToPreviousDir.getSecond())));
log.info("Successfully rolled back for cluster " + url + " and store "
+ clusterStoreTuple.getSecond() + " and node "
+ nodeToPreviousDir.getFirst() + " to dir "
+ nodeToPreviousDir.getSecond());
}
} finally {
adminClient.close();
}
}
throw e;
}
}