});
injector.getInstance(GuiceJpaInitializer.class);
try {
AmbariManagementController amc = injector.getInstance(AmbariManagementController.class);
Clusters clusters = injector.getInstance(Clusters.class);
Gson gson = new Gson();
clusters.addHost("host1");
clusters.addHost("host2");
clusters.addHost("host3");
Host host = clusters.getHost("host1");
host.setOsType("centos5");
host.persist();
host = clusters.getHost("host2");
host.setOsType("centos5");
host.persist();
host = clusters.getHost("host3");
host.setOsType("centos5");
host.persist();
ClusterRequest clusterRequest = new ClusterRequest(null, "c1", "HDP-1.2.0", null);
amc.createCluster(clusterRequest);
Set<ServiceRequest> serviceRequests = new HashSet<ServiceRequest>();
serviceRequests.add(new ServiceRequest("c1", "HDFS", null));
ServiceResourceProviderTest.createServices(amc, serviceRequests);
Type confType = new TypeToken<Map<String, String>>() {
}.getType();
ConfigurationRequest configurationRequest = new ConfigurationRequest("c1", "core-site", "version1",
gson.<Map<String, String>>fromJson("{ \"fs.default.name\" : \"localhost:8020\"}", confType)
);
amc.createConfiguration(configurationRequest);
configurationRequest = new ConfigurationRequest("c1", "hdfs-site", "version1",
gson.<Map<String, String>>fromJson("{ \"dfs.datanode.data.dir.perm\" : \"750\"}", confType)
);
amc.createConfiguration(configurationRequest);
configurationRequest = new ConfigurationRequest("c1", "global", "version1",
gson.<Map<String, String>>fromJson("{ \"hbase_hdfs_root_dir\" : \"/apps/hbase/\"}", confType)
);
amc.createConfiguration(configurationRequest);
serviceRequests.clear();
serviceRequests.add(new ServiceRequest("c1", "HDFS", null));
ServiceResourceProviderTest.updateServices(amc, serviceRequests, mapRequestProps, true, false);
Set<ServiceComponentRequest> serviceComponentRequests = new HashSet<ServiceComponentRequest>();
serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "NAMENODE", null));
serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "SECONDARY_NAMENODE", null));
serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "DATANODE", null));
serviceComponentRequests.add(new ServiceComponentRequest("c1", "HDFS", "HDFS_CLIENT", null));
ComponentResourceProviderTest.createComponents(amc, serviceComponentRequests);
Set<HostRequest> hostRequests = new HashSet<HostRequest>();
hostRequests.add(new HostRequest("host1", "c1", null));
hostRequests.add(new HostRequest("host2", "c1", null));
hostRequests.add(new HostRequest("host3", "c1", null));
HostResourceProviderTest.createHosts(amc, hostRequests);
Set<ServiceComponentHostRequest> componentHostRequests = new HashSet<ServiceComponentHostRequest>();
componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host1", null));
componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", null));
componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "SECONDARY_NAMENODE", "host1", null));
componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host2", null));
componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "DATANODE", "host3", null));
amc.createHostComponents(componentHostRequests);
serviceRequests.clear();
serviceRequests.add(new ServiceRequest("c1", "HDFS", "INSTALLED"));
ServiceResourceProviderTest.updateServices(amc, serviceRequests, mapRequestProps, true, false);
Cluster cluster = clusters.getCluster("c1");
Map<String, ServiceComponentHost> namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts();
org.junit.Assert.assertEquals(1, namenodes.size());
ServiceComponentHost componentHost = namenodes.get("host1");