FileUtils.deleteDirectory(new File(logDir));
} catch (IOException e) {
LOG.error(e);
}
final ZkServer server = new ZkServer(dataDir, logDir, defaultNameSpace);
server.start();
// start Generic AppMaster that interacts with Yarn RM
AppMasterConfig appMasterConfig = new AppMasterConfig();
String containerIdStr = appMasterConfig.getContainerId();
ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
ApplicationAttemptId appAttemptID = containerId.getApplicationAttemptId();
String configFile = AppMasterConfig.AppEnvironment.APP_SPEC_FILE.toString();
String className = appMasterConfig.getApplicationSpecFactory();
GenericApplicationMaster genericApplicationMaster = new GenericApplicationMaster(appAttemptID);
try {
genericApplicationMaster.start();
} catch (Exception e) {
LOG.error("Unable to start application master: ", e);
}
ApplicationSpecFactory factory = HelixYarnUtil.createInstance(className);
// TODO: Avoid setting static variable.
YarnProvisioner.applicationMaster = genericApplicationMaster;
YarnProvisioner.applicationMasterConfig = appMasterConfig;
ApplicationSpec applicationSpec = factory.fromYaml(new FileInputStream(configFile));
YarnProvisioner.applicationSpec = applicationSpec;
String zkAddress = appMasterConfig.getZKAddress();
String clusterName = appMasterConfig.getAppName();
// CREATE CLUSTER and setup the resources
// connect
ZkHelixConnection connection = new ZkHelixConnection(zkAddress);
connection.connect();
// create the cluster
ClusterId clusterId = ClusterId.from(clusterName);
ClusterAccessor clusterAccessor = connection.createClusterAccessor(clusterId);
StateModelDefinition statelessService =
new StateModelDefinition(StateModelConfigGenerator.generateConfigForStatelessService());
StateModelDefinition taskStateModel =
new StateModelDefinition(StateModelConfigGenerator.generateConfigForTaskStateModel());
clusterAccessor.createCluster(new ClusterConfig.Builder(clusterId)
.addStateModelDefinition(statelessService).addStateModelDefinition(taskStateModel).build());
for (String service : applicationSpec.getServices()) {
String resourceName = service;
// add the resource with the local provisioner
ResourceId resourceId = ResourceId.from(resourceName);
ServiceConfig serviceConfig = applicationSpec.getServiceConfig(resourceName);
serviceConfig.setSimpleField("service_name", service);
int numContainers = serviceConfig.getIntField("num_containers", 1);
YarnProvisionerConfig provisionerConfig = new YarnProvisionerConfig(resourceId);
provisionerConfig.setNumContainers(numContainers);
AutoRebalanceModeISBuilder idealStateBuilder = new AutoRebalanceModeISBuilder(resourceId);
idealStateBuilder.setStateModelDefId(statelessService.getStateModelDefId());
idealStateBuilder.add(PartitionId.from(resourceId, "0"));
idealStateBuilder.setNumReplica(1);
ResourceConfig.Builder resourceConfigBuilder =
new ResourceConfig.Builder(ResourceId.from(resourceName));
ResourceConfig resourceConfig =
resourceConfigBuilder.provisionerConfig(provisionerConfig)
.idealState(idealStateBuilder.build()) //
.build();
clusterAccessor.addResource(resourceConfig);
}
// start controller
ControllerId controllerId = ControllerId.from("controller1");
HelixController controller = connection.createController(clusterId, controllerId);
controller.start();
// Start any pre-specified jobs
List<TaskConfig> taskConfigs = applicationSpec.getTaskConfigs();
if (taskConfigs != null) {
YarnConfiguration conf = new YarnConfiguration();
FileSystem fs;
fs = FileSystem.get(conf);
for (TaskConfig taskConfig : taskConfigs) {
URI yamlUri = taskConfig.getYamlURI();
if (yamlUri != null && taskConfig.name != null) {
InputStream is =
readFromHDFS(fs, taskConfig.name, yamlUri, applicationSpec,
appAttemptID.getApplicationId());
Workflow workflow = Workflow.parse(is);
TaskDriver taskDriver = new TaskDriver(new ZKHelixManager(controller));
taskDriver.start(workflow);
}
}
}
Thread shutdownhook = new Thread(new Runnable() {
@Override
public void run() {
server.shutdown();
}
});
Runtime.getRuntime().addShutdownHook(shutdownhook);
Thread.sleep(10000);