Fetcher fetcher = new Fetcher(conf);
ParseSegment parseSegment = new ParseSegment(conf);
CrawlDb crawlDbTool = new CrawlDb(conf);
LinkDb linkDbTool = new LinkDb(conf);
Indexer indexer = new Indexer(conf);
DeleteDuplicates dedup = new DeleteDuplicates(conf);
IndexMerger merger = new IndexMerger(conf);
// initialize crawlDb
injector.inject(crawlDb, rootUrlDir);
int i;
for (i = 0; i < depth; i++) { // generate new segment
Path segment = generator.generate(crawlDb, segments, -1, topN, System
.currentTimeMillis());
if (segment == null) {
LOG.info("Stopping at depth=" + i + " - no more URLs to fetch.");
break;
}
fetcher.fetch(segment, threads, org.apache.nutch.fetcher.Fetcher.isParsing(conf)); // fetch it
if (!Fetcher.isParsing(job)) {
parseSegment.parse(segment); // parse it, if needed
}
crawlDbTool.update(crawlDb, new Path[]{segment}, true, true); // update crawldb
}
if (i > 0) {
linkDbTool.invert(linkDb, segments, true, true, false); // invert links
if(indexes != null) {
// Delete old indexes
if (fs.exists(indexes)) {
LOG.info("Deleting old indexes: " + indexes);
fs.delete(indexes, true);
}
// Delete old index
if (fs.exists(index)) {
LOG.info("Deleting old merged index: " + index);
fs.delete(index, true);
}
}
// index, dedup & merge
FileStatus[] fstats = fs.listStatus(segments, HadoopFSUtil.getPassDirectoriesFilter(fs));
indexer.index(indexes, crawlDb, linkDb, Arrays.asList(HadoopFSUtil.getPaths(fstats)));
if(indexes != null) {
dedup.dedup(new Path[] { indexes });
fstats = fs.listStatus(indexes, HadoopFSUtil.getPassDirectoriesFilter(fs));
merger.merge(HadoopFSUtil.getPaths(fstats), index, tmpDir);
}
} else {
LOG.warn("No URLs to fetch - check your seed list and URL filters.");