OutputStream output = null;
DecompressorThreadManager decompressorManager = null;
ClientGetWorkerThread worker = null;
Bucket finalResult = null;
FetchResult result = null;
long maxLen = -1;
synchronized(this) {
if(expectedSize > 0) {
maxLen = expectedSize;
}
}
if(ctx.filterData && maxLen >= 0) {
maxLen = expectedSize * 2 + 1024;
}
if(maxLen == -1) {
maxLen = Math.max(ctx.maxTempLength, ctx.maxOutputLength);
}
FetchException ex = null; // set on failure
try {
if(returnBucket == null) finalResult = context.getBucketFactory(persistent()).makeBucket(maxLen);
else finalResult = returnBucket;
if(logMINOR) Logger.minor(this, "Writing final data to "+finalResult+" return bucket is "+returnBucket);
dataOutput .connect(dataInput);
result = new FetchResult(clientMetadata, finalResult);
// Decompress
if(decompressors != null) {
if(logMINOR) Logger.minor(this, "Decompressing...");
decompressorManager = new DecompressorThreadManager(dataInput, decompressors, maxLen);
dataInput = decompressorManager.execute();
}
output = finalResult.getOutputStream();
if(ctx.overrideMIME != null) mimeType = ctx.overrideMIME;
worker = new ClientGetWorkerThread(new BufferedInputStream(dataInput), output, uri, mimeType, hashes, ctx.filterData, ctx.charset, ctx.prefetchHook, ctx.tagReplacer, context.linkFilterExceptionProvider);
worker.start();
try {
streamGenerator.writeTo(dataOutput, context);
} catch(IOException e) {
//Check if the worker thread caught an exception
worker.getError();
//If not, throw the original error
throw e;
}
// An error will propagate backwards, so wait for the worker first.
if(logMINOR) Logger.minor(this, "Waiting for hashing, filtration, and writing to finish");
worker.waitFinished();
if(decompressorManager != null) {
if(logMINOR) Logger.minor(this, "Waiting for decompression to finalize");
decompressorManager.waitFinished();
}
if(worker.getClientMetadata() != null) {
clientMetadata = worker.getClientMetadata();
result = new FetchResult(clientMetadata, finalResult);
}
// These must be updated for ClientGet.
synchronized(this) {
this.expectedMIME = result.getMimeType();
this.expectedSize = result.size();
}
} catch(UnsafeContentTypeException e) {
Logger.normal(this, "Error filtering content: will not validate", e);
ex = e.createFetchException(ctx.overrideMIME != null ? ctx.overrideMIME : expectedMIME, expectedSize);
/*Not really the state's fault*/
} catch(URISyntaxException e) {
//Impossible
Logger.error(this, "URISyntaxException converting a FreenetURI to a URI!: "+e, e);
ex = new FetchException(FetchExceptionMode.INTERNAL_ERROR, e);
/*Not really the state's fault*/
} catch(CompressionOutputSizeException e) {
Logger.error(this, "Caught "+e, e);
ex = new FetchException(FetchExceptionMode.TOO_BIG, e);
} catch (InsufficientDiskSpaceException e) {
ex = new FetchException(FetchExceptionMode.NOT_ENOUGH_DISK_SPACE);
} catch(IOException e) {
Logger.error(this, "Caught "+e, e);
ex = new FetchException(FetchExceptionMode.BUCKET_ERROR, e);
} catch(FetchException e) {
Logger.error(this, "Caught "+e, e);
ex = e;
} catch(Throwable t) {
Logger.error(this, "Caught "+t, t);
ex = new FetchException(FetchExceptionMode.INTERNAL_ERROR, t);
} finally {
Closer.close(dataInput);
Closer.close(dataOutput);
Closer.close(output);
}
if(ex != null) {
onFailure(ex, state, context, true);
if(finalResult != null && finalResult != returnBucket) {
finalResult.free();
}
if(result != null) {
Bucket data = result.asBucket();
data.free();
}
return;
}
context.getJobRunner(persistent()).setCheckpointASAP();