;
final SubMonitor ticker = SubMonitor.convert(monitor, workload); // TODO: scaling
PPDiagnosticsRunner ppRunner = new PPDiagnosticsRunner();
RubyHelper rubyHelper = new RubyHelper();
try {
IValidationAdvisor.ComplianceLevel complianceLevel = options.getComplianceLevel();
if(complianceLevel == null)
complianceLevel = IValidationAdvisor.ComplianceLevel.PUPPET_2_7;
IPotentialProblemsAdvisor problemsAdvisor = options.getProblemsAdvisor();
if(problemsAdvisor == null)
problemsAdvisor = new DefaultPotentialProblemsAdvisor();
ppRunner.setUp(complianceLevel, problemsAdvisor);
rubyHelper.setUp();
}
catch(Exception e) {
addExceptionDiagnostic(diagnostics, "Internal Error: Exception while setting up diagnostics.", e);
return new BuildResult(rubyHelper.isRubyServicesAvailable()); // give up
}
ppRunner.configureEncoding(options.getEncodingProvider());
ppRunner.configureSearchPath(root, options.getSearchPath(), options.getEnvironment());
// get the configured search path
final PPSearchPath searchPath = ppRunner.getDefaultSearchPath();
// Modulefile processing
// Modulefiles must be processed first in order to figure out containers and container visibility.
final IPath rootPath = new Path(root.getAbsolutePath());
final IPath nodeRootPath = rootPath.append(NAME_OF_DIR_WITH_RESTRICTED_SCOPE);
// collect info in a structure
Multimap<ModuleName, MetadataInfo> moduleData = ArrayListMultimap.create();
for(File mdRoot : mdRoots) {
// load and remember all that loaded ok
File[] mdProvider = new File[1];
Metadata m;
try {
m = forge.createFromModuleDirectory(mdRoot, true, null, mdProvider, diagnostics);
}
catch(IOException e) {
addFileError(
diagnostics, mdProvider[0], mdRoot, "Can not parse file: " + e.getMessage(),
IValidationConstants.ISSUE__MODULEFILE_PARSE_ERROR);
m = null;
}
if(m == null)
worked(ticker, 1);
else {
File f = mdProvider[0];
ModuleName moduleName = m.getName();
if(options.isCheckModuleSemantics() && isOnPath(pathToFile(f.getAbsolutePath(), root), searchPath)) {
// remember the metadata and where it came from
// and if it represents a NODE as opposed to a regular MODULE
moduleData.put(
moduleName, new MetadataInfo(m, f, nodeRootPath.isPrefixOf(new Path(f.getAbsolutePath()))));
}
if(isValidationWanted(examinedFiles, f)) {
validateModuleMetadata(m, diagnostics, f, root, options, ticker.newChild(1));
}
else
worked(ticker, 1);
}
}
if(options.isCheckModuleSemantics()) {
for(ModuleName key : moduleData.keySet()) {
// check there is only one version of each module
Collection<MetadataInfo> versions = moduleData.get(key);
boolean redeclared = versions.size() > 1;
for(MetadataInfo info : versions) {
// processed dependencies for one version of a modulefile (in case of errors, there may not be as many ticks as
// originally requested)
// this ticks before the fact (but there is "one for the pot" left at the end),
// as this makes it easier to just do "continue" below.
worked(ticker, 1);
// skip checks for unwanted
final boolean shouldDiagnosticBeReported = isValidationWanted(examinedFiles, info.getFile());
// if(!) continue;
if(redeclared && shouldDiagnosticBeReported) {
addFileError(
diagnostics, info.getFile(), root, "Redefinition - equally named already exists",
IValidationConstants.ISSUE__MODULEFILE_REDEFINITION);
}
// Resolve all dependencies
for(Dependency d : info.getMetadata().getDependencies()) {
// check dependency name and version requirement
final ModuleName requiredName = d.getName();
if(requiredName == null) {
if(shouldDiagnosticBeReported)
addFileError(
diagnostics, info.getFile(), root, "Dependency without name",
IValidationConstants.ISSUE__MODULEFILE_DEPENDENCY_ERROR);
continue; // not meaningful to resolve this
// dependency
}
// find the best candidate (ignore the fact that there should just be one version of each
// module - there may be several, and one of the match).
// It is allowed to have modules without versions, they can only be matched by
// a dependency that does not have a version requirement.
//
Collection<MetadataInfo> candidates = moduleData.get(requiredName);
List<Version> candidateVersions = Lists.newArrayList();
List<MetadataInfo> unversioned = Lists.newArrayList();
if(candidates != null)
for(MetadataInfo mi : candidates) {
Version cv = mi.getMetadata().getVersion();
if(cv == null) {
unversioned.add(mi);
continue; // the (possibly) broken version
// is reported elsewhere
}
candidateVersions.add(cv);
}
// if the dependency has no version requirement use ">=0"
final VersionRange versionRequirement = d.getVersionRequirement();
if(versionRequirement == null) {
// find best match for >= 0 if there are candidates with versions
// the best will always win over unversioned.
if(candidateVersions.size() > 0) {
Collections.sort(candidateVersions);
Version best = candidateVersions.get(candidateVersions.size() - 1);
// get the matched MetaDataInfo as the resolution of the dependency
// and remember it
for(MetadataInfo mi : candidates) {
if(mi.getMetadata().getVersion().equals(best))
info.addResolvedDependency(d, mi);
}
}
// or there must be unversioned candidates
else if(unversioned.size() == 0)
if(shouldDiagnosticBeReported)
addFileDiagnostic(
diagnostics, (candidates.size() > 0
? Diagnostic.WARNING
: Diagnostic.ERROR), info.getFile(), root,
"Unresolved Dependency to: " + d.getName() + " (unversioned).",
IValidationConstants.ISSUE__MODULEFILE_UNSATISFIED_DEPENDENCY);
else {
// pick the first as resolution
// worry about ambiguity elsewhere
info.addResolvedDependency(d, unversioned.get(0));
}
}
else {
// there was a version requirement, it must match something with a version.
Version best = d.getVersionRequirement().findBestMatch(candidateVersions);
if(best == null) {
info.addUnresolvedDependency(d);
if(shouldDiagnosticBeReported)
addFileDiagnostic(
diagnostics,
(candidates.size() > 0
? Diagnostic.WARNING
: Diagnostic.ERROR),
info.getFile(),
root,
"Unresolved Dependency to: " + d.getName() + " version: " +
d.getVersionRequirement(),
IValidationConstants.ISSUE__MODULEFILE_UNSATISFIED_DEPENDENCY);
}
else {
// get the matched MetaDataInfo as the resolution of the dependency
// and remember it
for(MetadataInfo mi : candidates) {
if(mi.getMetadata().getVersion().equals(best))
info.addResolvedDependency(d, mi);
}
}
}
}
}
}
IPotentialProblemsAdvisor advisor = options.getProblemsAdvisor();
if(advisor != null && advisor.circularDependencyPreference().isWarningOrError()) {
ValidationPreference preference = options.getProblemsAdvisor().circularDependencyPreference();
checkCircularDependencies(moduleData, diagnostics, root);
for(MetadataInfo mi : moduleData.values()) {
if(isValidationWanted(examinedFiles, mi.getFile())) {
for(String circularity : mi.getCircularityMessages())
addFileDiagnostic(
diagnostics, preference.isError()
? Diagnostic.ERROR
: Diagnostic.WARNING, mi.getFile(), root, circularity,
IPPDiagnostics.ISSUE__CIRCULAR_MODULE_DEPENDENCY);
}
}
}
}
// TODO: Wasteful to calculate the URL's more than once.
// Could be done once per pp and rb (to separate the processing), or have all in one pile
// and let processing look at extension.
// Calculate containers
// sets up iterateable over all files including pptp
boolean useContainers = true;
URI uri = options.getPlatformURI();
if(useContainers) {
List<URI> pptpURIs = Lists.newArrayList(uri != null
? uri
: PuppetTarget.getDefault().getPlatformURI());
ppRunner.configureContainers(root, moduleData.values(), //
Iterables.concat(Iterables.transform(Iterables.concat(ppFiles, rbFiles), new Function<File, URI>() {
@Override
public URI apply(File from) {
return URI.createFileURI(from.getPath());
}
}), pptpURIs));
}
// Load pptp
if(options.isCheckReferences()) {
try {
URI platformURI = options.getPlatformURI();
ppRunner.loadResource(platformURI != null
? platformURI
: PuppetTarget.getDefault().getPlatformURI());
}
catch(IOException e) {
addExceptionDiagnostic(diagnostics, "Internal Error: Could not load pptp.", e);
return new BuildResult(rubyHelper.isRubyServicesAvailable()); // give
// up
}
}
worked(ticker, 1);
// Load all ruby
for(File f : rbFiles) {
try {
// Skip "Rakefile.rb" or they will be processed twice (but still tick x2
// onece for validate and once for load - as this is included in work-count)
if(f.getName().toLowerCase().equals("rakefile.rb")) {
worked(ticker, 2);
continue;
}
// Syntax check ruby file
// consumes one rb tick
if(isValidationWanted(examinedFiles, f))
validateRubyFile(rubyHelper, diagnostics, f, root, ticker.newChild(1));
else
worked(ticker, 1);
// Load ruby file with pptp contribution
// consumes one rb tick
if(options.isCheckReferences()) {
Resource r = ppRunner.loadResource(new FileInputStream(f), URI.createFileURI(f.getPath()));
if(r != null)
rememberRootInResource(root, r);
}
worked(ticker, 1);
}
catch(Exception e) {
addExceptionDiagnostic(diagnostics, "Internal Error: Exception while processing file: " + f.getName() +
": " + e, e);
e.printStackTrace();
}
}
RakefileInfo rakefileInfo = new RakefileInfo();
// System.err.println("Processing Rakefiles count: " + rakeFiles.size());
for(File f : rakeFiles) {
// Syntax check ruby file
// consumes one rakefile tick
if(isValidationWanted(examinedFiles, f))
validateRubyFile(rubyHelper, diagnostics, f, root, ticker.newChild(1));
else
worked(ticker, 1);
// parsing adds one rakefile work tick
rakefileInfo.addRakefile(getRakefileInformation(rubyHelper, f, root, ticker.newChild(1)));
}
// Load all pp
// crosslink and validate all
Map<File, Resource> ppResources = Maps.newHashMapWithExpectedSize(ppFiles.size());
for(File f : ppFiles) {
try {
ppResources.put(f, ppRunner.loadResource(new FileInputStream(f), URI.createFileURI(f.getPath())));
}
catch(IOException e) {
addExceptionDiagnostic(diagnostics, "I/O Error: Exception while processing file: " + f.toString(), e);
}
catch(Exception e) {
addExceptionDiagnostic(
diagnostics, "Internal Error: Exception while processing file: " + f.toString(), e);
}
// consume one pp tick
worked(ticker, 1);
}
// Must set the root in all resources to allow cross reference error reports to contain
// relative paths
for(Resource r : ppResources.values())
rememberRootInResource(root, r);
IResourceValidator validator = ppRunner.getPPResourceValidator();
long maxLinkTime = 0;
// Turn on for debugging particular files
// File slowCandidate = new File("/Users/henrik/gitrepos/forge-modules/jeffmccune-mockbuild/manifests/init.pp");
for(Entry<File, Resource> r : ppResources.entrySet()) {
File f = r.getKey();
if(!isValidationWanted(examinedFiles, f))
continue;
long beforeTime = System.currentTimeMillis();
boolean profileThis = false; // /* for debugging slow file */
// f.equals(slowCandidate);
if(options.isCheckReferences())
ppRunner.resolveCrossReferences(r.getValue(), profileThis, ticker);
long afterTime = System.currentTimeMillis();
if(afterTime - beforeTime > maxLinkTime) {
maxLinkTime = afterTime - beforeTime;
}
final CancelIndicator cancelMonitor = new CancelIndicator() {
public boolean isCanceled() {
return ticker.isCanceled();
}
};
List<Issue> issues = validator.validate(r.getValue(), CheckMode.ALL, cancelMonitor);
for(Issue issue : issues) {
addIssueDiagnostic(diagnostics, issue, f, root);
}
}
// // Debug stuff
// if(slowestFile != null)
// System.err.printf("Slowest file =%s (%s)\n", slowestFile.getAbsolutePath(), maxLinkTime);
// // Compute the returned map
// // Only the restricted modules are wanted (as everything else sees everything)
// Iterable<File> filteredMdFiles = Iterables.filter(mdFiles, new Predicate<File>() {
//
// @Override
// public boolean apply(File input) {
// IPath p = new Path(input.getPath());
// if(p.segmentCount() < 3)
// return false;
// p = p.removeLastSegments(2);
// return NAME_OF_DIR_WITH_RESTRICTED_SCOPE.equals(p.lastSegment());
// }
// });
AllModuleReferences all = ppRunner.getAllModulesState();
// set the root to allow relative lookup of module exports
all.setRoot(root);
// // Debug stuff...
// for(File f : result.getMap().keySet()) {
// System.err.println("Exports for file: " + f.toString());
// for(ExportsPerModule.Export export : result.getMap().get(f)) {
// System.err.printf(
// " %s, %s, %s\n", export.getName(), export.getEClass().getName(),
// export.getParentName());
// }
// }
ppRunner.tearDown();
boolean rubyServicesAvailable = rubyHelper.isRubyServicesAvailable();
rubyHelper.tearDown();
// make sure everything is consumed
ticker.setWorkRemaining(0);
BuildResult buildResult = new BuildResult(rubyServicesAvailable);
// buildResult.setExportsForNodes(result);
buildResult.setAllModuleReferences(all);