prop.put("remoteCrawlState", "");
prop.put("list-remote", 0);
prop.put("forwardToCrawlStart", "0");
// get segment
Segment indexSegment = null;
if (post != null && post.containsKey("segment")) {
final String segmentName = post.get("segment");
if (sb.indexSegments.segmentExist(segmentName)) {
indexSegment = sb.indexSegments.segment(segmentName);
}
} else {
// take default segment
indexSegment = sb.indexSegments.segment(Segments.Process.PUBLIC);
}
prop.put("info", "0");
if (post != null && post.containsKey("continue")) {
// continue queue
final String queue = post.get("continue", "");
if ("localcrawler".equals(queue)) {
sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL);
} else if ("remotecrawler".equals(queue)) {
sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_REMOTE_TRIGGERED_CRAWL);
}
}
if (post != null && post.containsKey("pause")) {
// pause queue
final String queue = post.get("pause", "");
if ("localcrawler".equals(queue)) {
sb.pauseCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL);
} else if ("remotecrawler".equals(queue)) {
sb.pauseCrawlJob(SwitchboardConstants.CRAWLJOB_REMOTE_TRIGGERED_CRAWL);
}
}
if (post != null && post.containsKey("crawlingstart")) {
// init crawl
if (sb.peers == null) {
prop.put("info", "3");
} else {
String crawlingStart = post.get("crawlingURL","").trim(); // the crawljob start url
// add the prefix http:// if necessary
int pos = crawlingStart.indexOf("://",0);
if (pos == -1) {
if (crawlingStart.startsWith("www")) crawlingStart = "http://" + crawlingStart;
if (crawlingStart.startsWith("ftp")) crawlingStart = "ftp://" + crawlingStart;
}
// remove crawlingFileContent before we record the call
final String crawlingFileName = post.get("crawlingFile");
final File crawlingFile = (crawlingFileName != null && crawlingFileName.length() > 0) ? new File(crawlingFileName) : null;
if (crawlingFile != null && crawlingFile.exists()) {
post.remove("crawlingFile$file");
}
// normalize URL
DigestURI crawlingStartURL = null;
if (crawlingFile == null) try {crawlingStartURL = new DigestURI(crawlingStart);} catch (final MalformedURLException e1) {Log.logException(e1);}
crawlingStart = (crawlingStartURL == null) ? null : crawlingStartURL.toNormalform(true, true);
// set new properties
final boolean fullDomain = "domain".equals(post.get("range", "wide")); // special property in simple crawl start
final boolean subPath = "subpath".equals(post.get("range", "wide")); // special property in simple crawl start
// set the crawl filter
String newcrawlingMustMatch = post.get("mustmatch", CrawlProfile.MATCH_ALL_STRING);
final String newcrawlingMustNotMatch = post.get("mustnotmatch", CrawlProfile.MATCH_NEVER_STRING);
if (newcrawlingMustMatch.length() < 2) newcrawlingMustMatch = CrawlProfile.MATCH_ALL_STRING; // avoid that all urls are filtered out if bad value was submitted
String ipMustMatch = post.get("ipMustmatch", CrawlProfile.MATCH_ALL_STRING);
final String ipMustNotMatch = post.get("ipMustnotmatch", CrawlProfile.MATCH_NEVER_STRING);
if (ipMustMatch.length() < 2) ipMustMatch = CrawlProfile.MATCH_ALL_STRING;
final String countryMustMatch = post.getBoolean("countryMustMatchSwitch", false) ? post.get("countryMustMatchList", "") : "";
sb.setConfig("crawlingIPMustMatch", ipMustMatch);
sb.setConfig("crawlingIPMustNotMatch", ipMustNotMatch);
if (countryMustMatch.length() > 0) sb.setConfig("crawlingCountryMustMatch", countryMustMatch);
// special cases:
if (crawlingStartURL!= null && fullDomain) {
newcrawlingMustMatch = CrawlProfile.mustMatchFilterFullDomain(crawlingStartURL);
if (subPath) newcrawlingMustMatch = newcrawlingMustMatch.substring(0, newcrawlingMustMatch.length() - 2) + crawlingStartURL.getPath() + ".*";
}
if (crawlingStart!= null && subPath && (pos = crawlingStart.lastIndexOf('/')) > 0) {
newcrawlingMustMatch = crawlingStart.substring(0, pos + 1) + ".*";
}
final boolean crawlOrder = post.get("crawlOrder", "off").equals("on");
env.setConfig("crawlOrder", crawlOrder);
int newcrawlingdepth = post.getInt("crawlingDepth", 8);
env.setConfig("crawlingDepth", Integer.toString(newcrawlingdepth));
if ((crawlOrder) && (newcrawlingdepth > 8)) newcrawlingdepth = 8;
final boolean directDocByURL = "on".equals(post.get("directDocByURL", "off"));
env.setConfig("crawlingDirectDocByURL", directDocByURL);
// recrawl
final String recrawl = post.get("recrawl", "nodoubles"); // nodoubles, reload, scheduler
boolean crawlingIfOlderCheck = "on".equals(post.get("crawlingIfOlderCheck", "off"));
int crawlingIfOlderNumber = post.getInt("crawlingIfOlderNumber", -1);
String crawlingIfOlderUnit = post.get("crawlingIfOlderUnit","year"); // year, month, day, hour
int repeat_time = post.getInt("repeat_time", -1);
final String repeat_unit = post.get("repeat_unit", "seldays"); // selminutes, selhours, seldays
if ("scheduler".equals(recrawl) && repeat_time > 0) {
// set crawlingIfOlder attributes that are appropriate for scheduled crawling
crawlingIfOlderCheck = true;
crawlingIfOlderNumber = "selminutes".equals(repeat_unit) ? 1 : "selhours".equals(repeat_unit) ? repeat_time / 2 : repeat_time * 12;
crawlingIfOlderUnit = "hour";
} else if ("reload".equals(recrawl)) {
repeat_time = -1;
crawlingIfOlderCheck = true;
} else if ("nodoubles".equals(recrawl)) {
repeat_time = -1;
crawlingIfOlderCheck = false;
}
final long crawlingIfOlder = recrawlIfOlderC(crawlingIfOlderCheck, crawlingIfOlderNumber, crawlingIfOlderUnit);
env.setConfig("crawlingIfOlder", crawlingIfOlder);
// store this call as api call
if (repeat_time > 0) {
// store as scheduled api call
sb.tables.recordAPICall(post, "Crawler_p.html", WorkTables.TABLE_API_TYPE_CRAWLER, "crawl start for " + ((crawlingStart == null) ? post.get("crawlingFile", "") : crawlingStart), repeat_time, repeat_unit.substring(3));
} else {
// store just a protocol
sb.tables.recordAPICall(post, "Crawler_p.html", WorkTables.TABLE_API_TYPE_CRAWLER, "crawl start for " + ((crawlingStart == null) ? post.get("crawlingFile", "") : crawlingStart));
}
final boolean crawlingDomMaxCheck = "on".equals(post.get("crawlingDomMaxCheck", "off"));
final int crawlingDomMaxPages = (crawlingDomMaxCheck) ? post.getInt("crawlingDomMaxPages", -1) : -1;
env.setConfig("crawlingDomMaxPages", Integer.toString(crawlingDomMaxPages));
final boolean crawlingQ = "on".equals(post.get("crawlingQ", "off"));
env.setConfig("crawlingQ", crawlingQ);
final boolean indexText = "on".equals(post.get("indexText", "on"));
env.setConfig("indexText", indexText);
final boolean indexMedia = "on".equals(post.get("indexMedia", "on"));
env.setConfig("indexMedia", indexMedia);
boolean storeHTCache = "on".equals(post.get("storeHTCache", "on"));
if (crawlingStartURL!= null &&(crawlingStartURL.isFile() || crawlingStartURL.isSMB())) storeHTCache = false;
env.setConfig("storeHTCache", storeHTCache);
CacheStrategy cachePolicy = CacheStrategy.parse(post.get("cachePolicy", "iffresh"));
if (cachePolicy == null) cachePolicy = CacheStrategy.IFFRESH;
final boolean xsstopw = "on".equals(post.get("xsstopw", "off"));
env.setConfig("xsstopw", xsstopw);
final boolean xdstopw = "on".equals(post.get("xdstopw", "off"));
env.setConfig("xdstopw", xdstopw);
final boolean xpstopw = "on".equals(post.get("xpstopw", "off"));
env.setConfig("xpstopw", xpstopw);
final String crawlingMode = post.get("crawlingMode","url");
if (crawlingStart != null && crawlingStart.startsWith("ftp")) {
try {
// check if the crawl filter works correctly
Pattern.compile(newcrawlingMustMatch);
final CrawlProfile profile = new CrawlProfile(
crawlingStart,
crawlingStartURL,
newcrawlingMustMatch,
newcrawlingMustNotMatch,
ipMustMatch,
ipMustNotMatch,
countryMustMatch,
newcrawlingdepth,
directDocByURL,
crawlingIfOlder,
crawlingDomMaxPages,
crawlingQ,
indexText,
indexMedia,
storeHTCache,
crawlOrder,
xsstopw,
xdstopw,
xpstopw,
cachePolicy);
sb.crawler.putActive(profile.handle().getBytes(), profile);
sb.pauseCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL);
final DigestURI url = crawlingStartURL;
sb.crawlStacker.enqueueEntriesFTP(sb.peers.mySeed().hash.getBytes(), profile.handle(), url.getHost(), url.getPort(), false);
} catch (final PatternSyntaxException e) {
prop.put("info", "4"); // crawlfilter does not match url
prop.putHTML("info_newcrawlingfilter", newcrawlingMustMatch);
prop.putHTML("info_error", e.getMessage());
} catch (final Exception e) {
// mist
prop.put("info", "7"); // Error with file
prop.putHTML("info_crawlingStart", crawlingStart);
prop.putHTML("info_error", e.getMessage());
Log.logException(e);
}
sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL);
} else if ("url".equals(crawlingMode)) {
// check if pattern matches
if ((crawlingStart == null || crawlingStartURL == null) /* || (!(crawlingStart.matches(newcrawlingfilter))) */) {
// print error message
prop.put("info", "4"); //crawlfilter does not match url
prop.putHTML("info_newcrawlingfilter", newcrawlingMustMatch);
prop.putHTML("info_crawlingStart", crawlingStart);
} else try {
// check if the crawl filter works correctly
Pattern.compile(newcrawlingMustMatch);
// stack request
// first delete old entry, if exists
final DigestURI url = new DigestURI(crawlingStart);
final byte[] urlhash = url.hash();
indexSegment.urlMetadata().remove(urlhash);
sb.crawlQueues.noticeURL.removeByURLHash(urlhash);
sb.crawlQueues.errorURL.remove(urlhash);
// get a scraper to get the title
final ContentScraper scraper = sb.loader.parseResource(url, CacheStrategy.IFFRESH);
final String title = scraper == null ? url.toNormalform(true, true) : scraper.getTitle();
final String description = scraper.getDescription();
// stack url
sb.crawler.removePassive(crawlingStartURL.hash()); // if there is an old entry, delete it
final CrawlProfile pe = new CrawlProfile(
(crawlingStartURL.getHost() == null) ? crawlingStartURL.toNormalform(true, false) : crawlingStartURL.getHost(),
crawlingStartURL,
newcrawlingMustMatch,
newcrawlingMustNotMatch,
ipMustMatch,
ipMustNotMatch,
countryMustMatch,
newcrawlingdepth,
directDocByURL,
crawlingIfOlder,
crawlingDomMaxPages,
crawlingQ,
indexText, indexMedia,
storeHTCache,
crawlOrder,
xsstopw,
xdstopw,
xpstopw,
cachePolicy);
sb.crawler.putActive(pe.handle().getBytes(), pe);
final String reasonString = sb.crawlStacker.stackCrawl(new Request(
sb.peers.mySeed().hash.getBytes(),
url,
null,
"CRAWLING-ROOT",
new Date(),
pe.handle(),
0,
0,
0,
0
));
if (reasonString == null) {
// create a bookmark from crawl start url
//final Set<String> tags=ListManager.string2set(BookmarkHelper.cleanTagsString(post.get("bookmarkFolder","/crawlStart")));
final Set<String> tags=ListManager.string2set(BookmarkHelper.cleanTagsString("/crawlStart"));
tags.add("crawlStart");
final String[] keywords = scraper.getKeywords();
if (keywords != null) {
for (final String k: keywords) {
final String kk = BookmarkHelper.cleanTagsString(k);
if (kk.length() > 0) tags.add(kk);
}
}
String tagStr = tags.toString();
if (tagStr.length() > 2 && tagStr.startsWith("[") && tagStr.endsWith("]")) tagStr = tagStr.substring(1, tagStr.length() - 2);
// we will create always a bookmark to use this to track crawled hosts
final BookmarksDB.Bookmark bookmark = sb.bookmarksDB.createBookmark(crawlingStart, "admin");
if (bookmark != null) {
bookmark.setProperty(BookmarksDB.Bookmark.BOOKMARK_TITLE, title);
bookmark.setProperty(BookmarksDB.Bookmark.BOOKMARK_DESCRIPTION, description);
bookmark.setOwner("admin");
bookmark.setPublic(false);
bookmark.setTags(tags, true);
sb.bookmarksDB.saveBookmark(bookmark);
}
// do the same for ymarks
// TODO: could a non admin user add crawls?
sb.tables.bookmarks.createBookmark(sb.loader, url, YMarkTables.USER_ADMIN, true, "crawlStart", "/Crawl Start");
// liftoff!
prop.put("info", "8");//start msg
prop.putHTML("info_crawlingURL", post.get("crawlingURL"));
// generate a YaCyNews if the global flag was set
if (!sb.isRobinsonMode() && crawlOrder) {
final Map<String, String> m = new HashMap<String, String>(pe); // must be cloned
m.remove("specificDepth");
m.remove("indexText");
m.remove("indexMedia");
m.remove("remoteIndexing");
m.remove("xsstopw");
m.remove("xpstopw");
m.remove("xdstopw");
m.remove("storeTXCache");
m.remove("storeHTCache");
m.remove("generalFilter");
m.remove("specificFilter");
m.put("intention", post.get("intention", "").replace(',', '/'));
sb.peers.newsPool.publishMyNews(sb.peers.mySeed(), NewsPool.CATEGORY_CRAWL_START, m);
}
} else {
prop.put("info", "5"); //Crawling failed
prop.putHTML("info_crawlingURL", (post.get("crawlingURL")));
prop.putHTML("info_reasonString", reasonString);
sb.crawlQueues.errorURL.push(
new Request(
sb.peers.mySeed().hash.getBytes(),
crawlingStartURL,
null,
"",
new Date(),
pe.handle(),
0,
0,
0,
0),
sb.peers.mySeed().hash.getBytes(),
new Date(),
1,
FailCategory.FINAL_LOAD_CONTEXT,
reasonString, -1);
}
} catch (final PatternSyntaxException e) {
prop.put("info", "4"); // crawlfilter does not match url
prop.putHTML("info_newcrawlingfilter", newcrawlingMustMatch);
prop.putHTML("info_error", e.getMessage());
} catch (final Exception e) {
// mist
prop.put("info", "6"); // Error with url
prop.putHTML("info_crawlingStart", crawlingStart);
prop.putHTML("info_error", e.getMessage());
Log.logException(e);
}
} else if ("file".equals(crawlingMode)) {
if (post.containsKey("crawlingFile")) {
final String crawlingFileContent = post.get("crawlingFile$file", "");
try {
// check if the crawl filter works correctly
Pattern.compile(newcrawlingMustMatch);
final ContentScraper scraper = new ContentScraper(new DigestURI(crawlingFile));
final Writer writer = new TransformerWriter(null, null, scraper, null, false);
if (crawlingFile != null && crawlingFile.exists()) {
FileUtils.copy(new FileInputStream(crawlingFile), writer);
} else {
FileUtils.copy(crawlingFileContent, writer);
}
writer.close();
// get links and generate filter
final Map<MultiProtocolURI, Properties> hyperlinks = scraper.getAnchors();
if (fullDomain && newcrawlingdepth > 0) newcrawlingMustMatch = siteFilter(hyperlinks.keySet());
final DigestURI crawlURL = new DigestURI("file://" + crawlingFile.toString());
final CrawlProfile profile = new CrawlProfile(
crawlingFileName,
crawlURL,
newcrawlingMustMatch,
CrawlProfile.MATCH_NEVER_STRING,
ipMustMatch,
ipMustNotMatch,
countryMustMatch,
newcrawlingdepth,
false,
crawlingIfOlder,
crawlingDomMaxPages,
crawlingQ,
indexText,
indexMedia,
storeHTCache,
crawlOrder,
xsstopw,
xdstopw,
xpstopw,
cachePolicy);
sb.crawler.putActive(profile.handle().getBytes(), profile);
sb.pauseCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL);
sb.crawlStacker.enqueueEntriesAsynchronous(sb.peers.mySeed().hash.getBytes(), profile.handle(), hyperlinks, true);
} catch (final PatternSyntaxException e) {
prop.put("info", "4"); // crawlfilter does not match url
prop.putHTML("info_newcrawlingfilter", newcrawlingMustMatch);
prop.putHTML("info_error", e.getMessage());
} catch (final Exception e) {
// mist
prop.put("info", "7"); // Error with file
prop.putHTML("info_crawlingStart", crawlingFileName);
prop.putHTML("info_error", e.getMessage());
Log.logException(e);
}
sb.continueCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL);
}
} else if ("sitemap".equals(crawlingMode)) {
final String sitemapURLStr = post.get("sitemapURL","");
try {
final DigestURI sitemapURL = new DigestURI(sitemapURLStr);
final CrawlProfile pe = new CrawlProfile(
sitemapURLStr,
sitemapURL,
CrawlProfile.MATCH_ALL_STRING,
CrawlProfile.MATCH_NEVER_STRING,
ipMustMatch,
ipMustNotMatch,
countryMustMatch,
0,
false,
crawlingIfOlder,
crawlingDomMaxPages,
true,
indexText,
indexMedia,
storeHTCache,
crawlOrder,
xsstopw,
xdstopw,
xpstopw,
cachePolicy);
sb.crawler.putActive(pe.handle().getBytes(), pe);
final SitemapImporter importer = new SitemapImporter(sb, sitemapURL, pe);
importer.start();
} catch (final Exception e) {
// mist
prop.put("info", "6");//Error with url
prop.putHTML("info_crawlingStart", sitemapURLStr);
prop.putHTML("info_error", e.getMessage());
Log.logException(e);
}
} else if ("sitelist".equals(crawlingMode)) {
try {
final DigestURI sitelistURL = new DigestURI(crawlingStart);
// download document
ContentScraper scraper = null;
scraper = sb.loader.parseResource(sitelistURL, CacheStrategy.IFFRESH);
// String title = scraper.getTitle();
// String description = scraper.getDescription();
// get links and generate filter
final Map<MultiProtocolURI, Properties> hyperlinks = scraper.getAnchors();
if (fullDomain && newcrawlingdepth > 0) newcrawlingMustMatch = siteFilter(hyperlinks.keySet());
// put links onto crawl queue
final CrawlProfile profile = new CrawlProfile(
sitelistURL.getHost(),
sitelistURL,
newcrawlingMustMatch,
CrawlProfile.MATCH_NEVER_STRING,
ipMustMatch,
ipMustNotMatch,
countryMustMatch,
newcrawlingdepth,
directDocByURL,
crawlingIfOlder,
crawlingDomMaxPages,
crawlingQ,
indexText,
indexMedia,
storeHTCache,
crawlOrder,
xsstopw,
xdstopw,
xpstopw,
cachePolicy);
sb.crawler.putActive(profile.handle().getBytes(), profile);
sb.pauseCrawlJob(SwitchboardConstants.CRAWLJOB_LOCAL_CRAWL);
final Iterator<Map.Entry<MultiProtocolURI, Properties>> linkiterator = hyperlinks.entrySet().iterator();
DigestURI nexturl;
while (linkiterator.hasNext()) {
final Map.Entry<MultiProtocolURI, Properties> e = linkiterator.next();
if (e.getKey() == null) continue;
nexturl = new DigestURI(e.getKey());
// remove the url from the database to be prepared to crawl them again
final byte[] urlhash = nexturl.hash();
indexSegment.urlMetadata().remove(urlhash);
sb.crawlQueues.noticeURL.removeByURLHash(urlhash);
sb.crawlQueues.errorURL.remove(urlhash);
sb.crawlStacker.enqueueEntry(new Request(
sb.peers.mySeed().hash.getBytes(),
nexturl,