// We now query for the alive DNodes and build deployRequests accordingly
DeployRequest[] deployRequests = new DeployRequest[deployments.size()];
log.info("Querying Splout QNode for list of DNodes...");
SploutClient client = new SploutClient(qnode);
List<String> dnodes = client.dNodeList();
if(dnodes == null || dnodes.size() == 0) {
throw new IOException("No available DNodes in Splout cluster.");
}
int tIndex = 0;
for (TablespaceDepSpec tablespace : deployments) {
Path tablespaceOut = new Path(tablespace.getSourcePath());
// Define a DeployRequest for this Tablespace
deployRequests[tIndex] = new DeployRequest();
// Splout only accepts absolute URIs
FileSystem sourceFs = tablespaceOut.getFileSystem(conf);
if (!sourceFs.exists(tablespaceOut)) {
throw new IllegalArgumentException("Folder doesn't exist: " + tablespaceOut);
}
Path absoluteOutPath = tablespaceOut.makeQualified(sourceFs);
Path partitionMapPath = new Path(tablespaceOut, TablespaceGenerator.OUT_PARTITION_MAP);
if (!sourceFs.exists(partitionMapPath)) {
throw new IllegalArgumentException("Invalid tablespace folder: " + tablespaceOut + " doesn't contain a partition-map file.");
}
// Load the partition map
PartitionMap partitionMap = JSONSerDe.deSer(
HadoopUtils.fileToString(sourceFs, partitionMapPath), PartitionMap.class);
// Load the init statements, if they exist
ArrayList<String> initStatements = new ArrayList<String>();
Path initStatementsPath = new Path(tablespaceOut, TablespaceGenerator.OUT_INIT_STATEMENTS);
if (sourceFs.exists(initStatementsPath)) {
initStatements.addAll(JSONSerDe.deSer(HadoopUtils.fileToString(sourceFs, initStatementsPath), ArrayList.class));
}
// Add the other initStatements coming in the deploy request
if (tablespace.getInitStatements() != null) {
initStatements.addAll(tablespace.getInitStatements());
}
String engine = DefaultEngine.class.getName();
// New : load the engine id used in the generation tool, if exists ( to maintain backwards compatibility )
Path engineId = new Path(tablespaceOut, TablespaceGenerator.OUT_ENGINE);
if(sourceFs.exists(engineId)) {
engine = HadoopUtils.fileToString(sourceFs, engineId);
log.info("Using generated engine id: " + engine);
}
// Finally set
deployRequests[tIndex].setInitStatements(initStatements);
deployRequests[tIndex].setEngine(engine);
deployRequests[tIndex].setTablespace(tablespace.getTablespace());
deployRequests[tIndex].setData_uri(new Path(absoluteOutPath, "store").toUri().toString());
deployRequests[tIndex].setPartitionMap(partitionMap.getPartitionEntries());
// If rep > dnodes, impossible to reach this level of replication
int repFactor = tablespace.getReplication();
if (dnodes.size() < repFactor) {
log.warn("WARNING: Replication factor " + repFactor + " for tablespace " + tablespace.getTablespace() + " is bigger than the number of serving DNodes. Adjusting replication factor to " + dnodes.size());
repFactor = dnodes.size();
}
deployRequests[tIndex].setReplicationMap(ReplicationMap.roundRobinMap(partitionMap.getPartitionEntries().size(), repFactor,
dnodes.toArray(new String[0])).getReplicationEntries());
tIndex++;
}
// Finally we send the deploy request
DeployInfo dInfo = client.deploy(deployRequests);
log.info("Deploy request of [" + deployments.size() + "] tablespaces performed. Deploy on [" + qnode + "] with version [" + dInfo.getVersion() + "] in progress.");
}