try {
// if URL already includes a query component then just append our
// params
HashWrapper one_of_the_hashes = null;
TRTrackerScraperResponseImpl one_of_the_responses = null;
char first_separator = scrapeURL.indexOf('?') == -1 ? '?' : '&';
String info_hash = "";
String flags = "";
List hashesForUDP = new ArrayList();
for (int i = 0; i < responses.size(); i++) {
TRTrackerScraperResponseImpl response = (TRTrackerScraperResponseImpl) responses.get(i);
HashWrapper hash = response.getHash();
if (Logger.isEnabled())
Logger.log(new LogEvent(TorrentUtils.getDownloadManager(hash), LOGID,
"TrackerStatus: scraping, single_hash_scrapes = "
+ bSingleHashScrapes));
if (!scraper.isNetworkEnabled(hash, tracker_url)) {
response.setNextScrapeStartTime(SystemTime.getCurrentTime()
+ FAULTY_SCRAPE_RETRY_INTERVAL);
response.setStatus(TRTrackerScraperResponse.ST_ERROR, MessageText
.getString(SS + "networkdisabled"));
scraper.scrapeReceived(response);
} else if ( !force && (
disable_all_scrapes ||
(disable_stopped_scrapes && !scraper.isTorrentRunning(hash)))){
response.setNextScrapeStartTime(SystemTime.getCurrentTime()
+ FAULTY_SCRAPE_RETRY_INTERVAL);
response.setStatus(TRTrackerScraperResponse.ST_ERROR, MessageText
.getString(SS + "disabled"));
scraper.scrapeReceived(response);
} else {
response.setStatus(TRTrackerScraperResponse.ST_SCRAPING,
MessageText.getString(SS + "scraping"));
// technically haven't recieved a scrape yet, but we need
// to notify listeners (the ones that display status)
scraper.scrapeReceived(response);
// the client-id stuff RELIES on info_hash being the FIRST
// parameter added by
// us to the URL, so don't change it!
info_hash += ((one_of_the_hashes != null) ? '&' : first_separator)
+ "info_hash=";
info_hash += URLEncoder.encode(
new String(hash.getBytes(), Constants.BYTE_ENCODING),
Constants.BYTE_ENCODING).replaceAll("\\+", "%20");
Object[] extensions = scraper.getExtensions(hash);
if ( extensions != null ){
if ( extensions[0] != null ){
info_hash += (String)extensions[0];
}
flags += (Character)extensions[1];
}else{
flags += TRTrackerScraperClientResolver.FL_NONE;
}
one_of_the_responses = response;
one_of_the_hashes = hash;
// 28 + 16 + 70*20 -> IPv4/udp packet size of 1444 , that should go through most lines unfragmented
if(hashesForUDP.size() < 70)
hashesForUDP.add(hash);
}
} // for responses
if (one_of_the_hashes == null)
return;
String request = scrapeURL + info_hash;
if ( az_tracker ){
String port_details = TRTrackerUtils.getPortsForURL();
request += port_details;
request += "&azsf=" + flags + "&azver=" + TRTrackerAnnouncer.AZ_TRACKER_VERSION_CURRENT;
}
URL reqUrl = new URL( request );
if (Logger.isEnabled())
Logger.log(new LogEvent(LOGID,
"Accessing scrape interface using url : " + reqUrl));
ByteArrayOutputStream message = new ByteArrayOutputStream();
long scrapeStartTime = SystemTime.getCurrentTime();
URL redirect_url = null;
String protocol = reqUrl.getProtocol();
URL udpScrapeURL = null;
boolean auto_probe = false;
if (protocol.equalsIgnoreCase("udp")){
if ( udpScrapeEnabled ){
udpScrapeURL = reqUrl;
}else{
throw( new IOException( "UDP Tracker protocol disabled" ));
}
}else if ( protocol.equalsIgnoreCase("http") &&
!az_tracker &&
scrapeCount % autoUDPscrapeEvery == 0 &&
udpProbeEnabled && udpScrapeEnabled ){
udpScrapeURL = new URL(reqUrl.toString().replaceFirst("^http", "udp"));
auto_probe = true;
}
try{
// set context in case authentication dialog is required
TorrentUtils.setTLSTorrentHash(one_of_the_hashes);
if ( udpScrapeURL != null){
boolean success = scrapeUDP( reqUrl, message, hashesForUDP, !auto_probe );
if((!success || message.size() == 0) && !protocol.equalsIgnoreCase("udp"))
{ // automatic UDP probe failed, use HTTP again
udpScrapeURL = null;
message.reset();
if(autoUDPscrapeEvery < 16)
autoUDPscrapeEvery <<= 1;
if (Logger.isEnabled())
Logger.log(new LogEvent(LOGID, LogEvent.LT_INFORMATION, "redirection of http scrape ["+scrapeURL+"] to udp failed, will retry in "+autoUDPscrapeEvery+" scrapes"));
} else if(success && !protocol.equalsIgnoreCase("udp"))
{
if (Logger.isEnabled())
Logger.log(new LogEvent(LOGID, LogEvent.LT_INFORMATION, "redirection of http scrape ["+scrapeURL+"] to udp successful"));
autoUDPscrapeEvery = 1;
TRTrackerUtils.setUDPProbeResult( reqUrl, true );
}
}
scrapeCount++;
if(udpScrapeURL == null)
redirect_url = scrapeHTTP(reqUrl, message);
}finally{
TorrentUtils.setTLSTorrentHash( null );
}
scrape_reply = message.toByteArray();
Map map = BDecoder.decode( scrape_reply );
boolean this_is_az_tracker = map.get( "aztracker" ) != null;
if ( az_tracker != this_is_az_tracker ){
az_tracker = this_is_az_tracker;
TRTrackerUtils.setAZTracker( tracker_url, az_tracker );
}
Map mapFiles = (Map) map.get("files");
if (Logger.isEnabled())
Logger.log(new LogEvent(LOGID, "Response from scrape interface "
+ scrapeURL + ": "
+ ((mapFiles == null) ? "null" : "" + mapFiles.size())
+ " returned"));
int iMinRequestInterval = 0;
if (map != null) {
/* "The spec":
* files
* infohash
* complete
* incomplete
* downloaded
* name
* flags
* min_request_interval
* failure reason
*/
/*
* files infohash complete incomplete downloaded name flags
* min_request_interval
*/
Map mapFlags = (Map) map.get("flags");
if (mapFlags != null) {
Long longScrapeValue = (Long) mapFlags
.get("min_request_interval");
if (longScrapeValue != null)
iMinRequestInterval = longScrapeValue.intValue();
// Tracker owners want this log entry
if (Logger.isEnabled())
Logger.log(new LogEvent(LOGID,
"Received min_request_interval of " + iMinRequestInterval));
}
}
if (mapFiles == null || mapFiles.size() == 0) {
// azureus extension here to handle "failure reason" returned for
// scrapes
byte[] failure_reason_bytes = map == null ? null : (byte[]) map
.get("failure reason");
if (failure_reason_bytes != null) {
long nextScrapeTime = SystemTime.getCurrentTime()
+ ((iMinRequestInterval == 0) ? FAULTY_SCRAPE_RETRY_INTERVAL
: iMinRequestInterval * 1000);
for (int i = 0; i < responses.size(); i++) {
TRTrackerScraperResponseImpl response = (TRTrackerScraperResponseImpl) responses
.get(i);
response.setNextScrapeStartTime(nextScrapeTime);
response.setStatus(TRTrackerScraperResponse.ST_ERROR,
MessageText.getString(SS + "error")
+ new String(failure_reason_bytes,
Constants.DEFAULT_ENCODING));
// notifiy listeners
scraper.scrapeReceived(response);
}
} else {
if (responses.size() > 1) {
// multi were requested, 0 returned. Therefore, multi not
// supported
bSingleHashScrapes = true;
if (Logger.isEnabled())
Logger.log(new LogEvent(LOGID, LogEvent.LT_WARNING, scrapeURL
+ " doesn't properly support " + "multi-hash scrapes"));
for (int i = 0; i < responses.size(); i++) {
TRTrackerScraperResponseImpl response = (TRTrackerScraperResponseImpl) responses
.get(i);
response.setStatus(TRTrackerScraperResponse.ST_ERROR,
MessageText.getString(SS + "error")
+ MessageText.getString(SSErr + "invalid"));
// notifiy listeners
scraper.scrapeReceived(response);
}
} else {
long nextScrapeTime = SystemTime.getCurrentTime()
+ ((iMinRequestInterval == 0) ? NOHASH_RETRY_INTERVAL
: iMinRequestInterval * 1000);
// 1 was requested, 0 returned. Therefore, hash not found.
TRTrackerScraperResponseImpl response = (TRTrackerScraperResponseImpl) responses
.get(0);
response.setNextScrapeStartTime(nextScrapeTime);
response.setStatus(TRTrackerScraperResponse.ST_ERROR,
MessageText.getString(SS + "error")
+ MessageText.getString(SSErr + "nohash"));
// notifiy listeners
scraper.scrapeReceived(response);
}
}
return;
}
/*
* If we requested mutliple hashes, but only one was returned, revert
* to Single Hash Scrapes, but continue on to process the one has that
* was returned (it may be a random one from the list)
*/
if (!bSingleHashScrapes && responses.size() > 1
&& mapFiles.size() == 1) {
bSingleHashScrapes = true;
if (Logger.isEnabled())
Logger.log(new LogEvent(LOGID, LogEvent.LT_WARNING, scrapeURL
+ " only returned " + mapFiles.size()
+ " hash scrape(s), but we asked for " + responses.size()));
}
for (int i = 0; i < responses.size(); i++) {
TRTrackerScraperResponseImpl response = (TRTrackerScraperResponseImpl) responses
.get(i);
// LGLogger.log( "decoding response #" +i+ ": " +
// ByteFormatter.nicePrint( response.getHash(), true ) );
// retrieve the scrape data for the relevent infohash
Map scrapeMap = (Map) mapFiles.get(new String(response.getHash().getBytes(),
Constants.BYTE_ENCODING));
if (scrapeMap == null) {
// some trackers that return only 1 hash return a random one!
if (responses.size() == 1 || mapFiles.size() != 1) {
response.setNextScrapeStartTime(SystemTime.getCurrentTime()
+ NOHASH_RETRY_INTERVAL);
response.setStatus(TRTrackerScraperResponse.ST_ERROR,
MessageText.getString(SS + "error")
+ MessageText.getString(SSErr + "nohash"));
// notifiy listeners
scraper.scrapeReceived(response);
} else if (!disable_stopped_scrapes || scraper.isTorrentRunning(response.getHash())) {
// This tracker doesn't support multiple hash requests.
// revert status to what it was
response.revertStatus();
if (response.getStatus() == TRTrackerScraperResponse.ST_SCRAPING) {
// System.out.println("Hash " +
// ByteFormatter.nicePrint(response.getHash(), true) + "
// mysteriously reverted to ST_SCRAPING!");
// response.setStatus(TRTrackerScraperResponse.ST_ONLINE, "");
response.setNextScrapeStartTime(SystemTime.getCurrentTime()
+ FAULTY_SCRAPE_RETRY_INTERVAL);
response.setStatus(TRTrackerScraperResponse.ST_ERROR,
MessageText.getString(SS + "error")
+ MessageText.getString(SSErr + "invalid"));
} else {
// force single-hash scrapes here
bSingleHashScrapes = true;
// only leave the next retry time if this is the first single
// hash fail
if (original_bSingleHashScrapes) {
response.setNextScrapeStartTime(SystemTime.getCurrentTime()
+ FAULTY_SCRAPE_RETRY_INTERVAL);
}
}
// notifiy listeners
scraper.scrapeReceived(response);
// if this was the first scrape request in the list,
// TrackerChecker
// will attempt to scrape again because we didn't reset the
// nextscrapestarttime. But the next time, bSingleHashScrapes
// will be true, and only 1 has will be requested, so there
// will not be infinite looping
}
// System.out.println("scrape: hash missing from reply");
} else {
// retrieve values
int seeds = ((Long) scrapeMap.get("complete")).intValue();
int peers = ((Long) scrapeMap.get("incomplete")).intValue();
Long comp = (Long) scrapeMap.get("downloaded");
int completed = comp == null ? -1 : comp.intValue();
// make sure we dont use invalid replies
if (seeds < 0 || peers < 0 || completed < -1) {
if (Logger.isEnabled()) {
HashWrapper hash = response.getHash();
Logger.log(new LogEvent(TorrentUtils.getDownloadManager(hash),
LOGID, "Invalid scrape response from '" + reqUrl
+ "': map = " + scrapeMap));
}
// We requested multiple hashes, but tracker didn't support
// multiple hashes and returned 1 hash. However, that hash is
// invalid because seeds or peers was < 0. So, exit. Scrape
// manager will run scrapes for each individual hash.
if (responses.size() > 1 && bSingleHashScrapes) {
response.setStatus(TRTrackerScraperResponse.ST_ERROR,
MessageText.getString(SS + "error")
+ MessageText.getString(SSErr + "invalid"));
scraper.scrapeReceived(response);
continue;
}
response.setNextScrapeStartTime(SystemTime.getCurrentTime()
+ FAULTY_SCRAPE_RETRY_INTERVAL);
response.setStatus(TRTrackerScraperResponse.ST_ERROR,
MessageText.getString(SS + "error")
+ MessageText.getString(SSErr + "invalid")
+ " "
+ (seeds < 0 ? MessageText
.getString("MyTorrentsView.seeds")
+ " == " + seeds + ". " : "")
+ (peers < 0 ? MessageText
.getString("MyTorrentsView.peers")
+ " == " + peers + ". " : "")
+ (completed < 0 ? MessageText
.getString("MyTorrentsView.completed")
+ " == " + completed + ". " : ""));
scraper.scrapeReceived(response);
continue;
}
int scrapeInterval = TRTrackerScraperResponseImpl
.calcScrapeIntervalSecs(iMinRequestInterval, seeds);
long nextScrapeTime = SystemTime.getCurrentTime()
+ (scrapeInterval * 1000);
response.setNextScrapeStartTime(nextScrapeTime);
// create the response
response.setScrapeStartTime(scrapeStartTime);
response.setSeeds(seeds);
response.setPeers(peers);
response.setCompleted(completed);
response.setStatus(TRTrackerScraperResponse.ST_ONLINE,
MessageText.getString(SS + "ok"));
// notifiy listeners
scraper.scrapeReceived(response);
try{
if ( responses.size() == 1 && redirect_url != null ){
// we only deal with redirects for single urls - if the tracker wants to
// redirect one of a group is has to force single-hash scrapes anyway
String redirect_str = redirect_url.toString();
int s_pos = redirect_str.indexOf( "/scrape" );
if ( s_pos != -1 ){
URL new_url = new URL( redirect_str.substring(0,s_pos) +
"/announce" + redirect_str.substring(s_pos+7));
if ( scraper.redirectTrackerUrl( response.getHash(), tracker_url, new_url )){
removeHash( response.getHash());
}
}
}
}catch( Throwable e ){
Debug.printStackTrace(e);
}
}
} // for responses
} catch (NoClassDefFoundError ignoreSSL) { // javax/net/ssl/SSLSocket
for (int i = 0; i < responses.size(); i++) {
TRTrackerScraperResponseImpl response = (TRTrackerScraperResponseImpl) responses
.get(i);
response.setNextScrapeStartTime(SystemTime.getCurrentTime()
+ FAULTY_SCRAPE_RETRY_INTERVAL);
response.setStatus(TRTrackerScraperResponse.ST_ERROR, MessageText
.getString(SS + "error")
+ ignoreSSL.getMessage());
// notifiy listeners
scraper.scrapeReceived(response);
}
} catch (FileNotFoundException e) {
for (int i = 0; i < responses.size(); i++) {
TRTrackerScraperResponseImpl response = (TRTrackerScraperResponseImpl) responses
.get(i);
response.setNextScrapeStartTime(SystemTime.getCurrentTime()
+ FAULTY_SCRAPE_RETRY_INTERVAL);
response.setStatus(TRTrackerScraperResponse.ST_ERROR, MessageText
.getString(SS + "error")
+ MessageText.getString("DownloadManager.error.filenotfound"));
// notifiy listeners
scraper.scrapeReceived(response);
}
} catch (SocketException e) {
setAllError(e);
} catch (SocketTimeoutException e) {
setAllError(e);
} catch (UnknownHostException e) {
setAllError(e);
} catch (PRUDPPacketHandlerException e) {
setAllError(e);
} catch (BEncodingException e) {
setAllError(e);
} catch (Exception e) {
// for apache we can get error 414 - URL too long. simplest solution
// for this
// is to fall back to single scraping
String error_message = e.getMessage();
if (error_message != null) {
if (error_message.indexOf(" 500 ") >= 0
|| error_message.indexOf(" 400 ") >= 0
|| error_message.indexOf(" 403 ") >= 0
|| error_message.indexOf(" 404 ") >= 0
|| error_message.indexOf(" 501 ") >= 0) {
// various errors that have a 99% chance of happening on
// any other scrape request
setAllError(e);
return;
}
if (error_message.indexOf("414") != -1
&& !bSingleHashScrapes) {
bSingleHashScrapes = true;
// Skip the setuing up the response. We want to scrape again
return;
}
}
String msg = Debug.getNestedExceptionMessage(e);
if ( scrape_reply != null ){
String trace_data;
if ( scrape_reply.length <= 150 ){
trace_data = new String(scrape_reply);
}else{
trace_data = new String(scrape_reply,0,150) + "...";
}
msg += " [" + trace_data + "]";
}
for (int i = 0; i < responses.size(); i++) {
TRTrackerScraperResponseImpl response = (TRTrackerScraperResponseImpl) responses
.get(i);
if (Logger.isEnabled()) {
HashWrapper hash = response.getHash();
Logger.log(new LogEvent(TorrentUtils.getDownloadManager(hash), LOGID,
LogEvent.LT_ERROR, "Error from scrape interface " + scrapeURL
+ " : " + msg + " (" + e.getClass() + ")"));
}
response.setNextScrapeStartTime(SystemTime.getCurrentTime()
+ FAULTY_SCRAPE_RETRY_INTERVAL);
response.setStatus(TRTrackerScraperResponse.ST_ERROR, MessageText
.getString(SS + "error")
+ msg);
// notifiy listeners
scraper.scrapeReceived(response);
}