// Fetching the container is essentially a full success, we should update the latest known good.
context.uskManager.checkUSK(uri, persistent(), false);
try {
if (binaryBlobWriter != null && !dontFinalizeBlobWriter) binaryBlobWriter.finalizeBucket();
} catch (IOException ioe) {
onFailure(new FetchException(FetchExceptionMode.BUCKET_ERROR, "Failed to close binary blob stream: "+ioe), null, context);
return;
} catch (BinaryBlobAlreadyClosedException e) {
onFailure(new FetchException(FetchExceptionMode.BUCKET_ERROR, "Failed to close binary blob stream, already closed: "+e, e), null, context);
return;
}
String mimeType = clientMetadata == null ? null : clientMetadata.getMIMEType();
if(forceCompatibleExtension != null && ctx.filterData) {
if(mimeType == null) {
onFailure(new FetchException(FetchExceptionMode.MIME_INCOMPATIBLE_WITH_EXTENSION, "No MIME type but need specific extension \""+forceCompatibleExtension+"\""), null, context);
return;
}
try {
checkCompatibleExtension(mimeType);
} catch (FetchException e) {
onFailure(e, null, context);
return;
}
}
synchronized(this) {
finished = true;
currentState = null;
expectedMIME = mimeType;
}
// Rest of method does not need to be synchronized.
// Variables will be updated on exit of method, and the only thing that is
// set is the returnBucket and the result. Not locking not only prevents
// nested locking resulting in deadlocks, it also prevents long locks due to
// doing massive encrypted I/Os while holding a lock.
PipedOutputStream dataOutput = new PipedOutputStream();
PipedInputStream dataInput = new PipedInputStream();
OutputStream output = null;
DecompressorThreadManager decompressorManager = null;
ClientGetWorkerThread worker = null;
Bucket finalResult = null;
FetchResult result = null;
long maxLen = -1;
synchronized(this) {
if(expectedSize > 0) {
maxLen = expectedSize;
}
}
if(ctx.filterData && maxLen >= 0) {
maxLen = expectedSize * 2 + 1024;
}
if(maxLen == -1) {
maxLen = Math.max(ctx.maxTempLength, ctx.maxOutputLength);
}
FetchException ex = null; // set on failure
try {
if(returnBucket == null) finalResult = context.getBucketFactory(persistent()).makeBucket(maxLen);
else finalResult = returnBucket;
if(logMINOR) Logger.minor(this, "Writing final data to "+finalResult+" return bucket is "+returnBucket);
dataOutput .connect(dataInput);
result = new FetchResult(clientMetadata, finalResult);
// Decompress
if(decompressors != null) {
if(logMINOR) Logger.minor(this, "Decompressing...");
decompressorManager = new DecompressorThreadManager(dataInput, decompressors, maxLen);
dataInput = decompressorManager.execute();
}
output = finalResult.getOutputStream();
if(ctx.overrideMIME != null) mimeType = ctx.overrideMIME;
worker = new ClientGetWorkerThread(new BufferedInputStream(dataInput), output, uri, mimeType, hashes, ctx.filterData, ctx.charset, ctx.prefetchHook, ctx.tagReplacer, context.linkFilterExceptionProvider);
worker.start();
try {
streamGenerator.writeTo(dataOutput, context);
} catch(IOException e) {
//Check if the worker thread caught an exception
worker.getError();
//If not, throw the original error
throw e;
}
// An error will propagate backwards, so wait for the worker first.
if(logMINOR) Logger.minor(this, "Waiting for hashing, filtration, and writing to finish");
worker.waitFinished();
if(decompressorManager != null) {
if(logMINOR) Logger.minor(this, "Waiting for decompression to finalize");
decompressorManager.waitFinished();
}
if(worker.getClientMetadata() != null) {
clientMetadata = worker.getClientMetadata();
result = new FetchResult(clientMetadata, finalResult);
}
// These must be updated for ClientGet.
synchronized(this) {
this.expectedMIME = result.getMimeType();
this.expectedSize = result.size();
}
} catch(UnsafeContentTypeException e) {
Logger.normal(this, "Error filtering content: will not validate", e);
ex = e.createFetchException(ctx.overrideMIME != null ? ctx.overrideMIME : expectedMIME, expectedSize);
/*Not really the state's fault*/
} catch(URISyntaxException e) {
//Impossible
Logger.error(this, "URISyntaxException converting a FreenetURI to a URI!: "+e, e);
ex = new FetchException(FetchExceptionMode.INTERNAL_ERROR, e);
/*Not really the state's fault*/
} catch(CompressionOutputSizeException e) {
Logger.error(this, "Caught "+e, e);
ex = new FetchException(FetchExceptionMode.TOO_BIG, e);
} catch (InsufficientDiskSpaceException e) {
ex = new FetchException(FetchExceptionMode.NOT_ENOUGH_DISK_SPACE);
} catch(IOException e) {
Logger.error(this, "Caught "+e, e);
ex = new FetchException(FetchExceptionMode.BUCKET_ERROR, e);
} catch(FetchException e) {
Logger.error(this, "Caught "+e, e);
ex = e;
} catch(Throwable t) {
Logger.error(this, "Caught "+t, t);
ex = new FetchException(FetchExceptionMode.INTERNAL_ERROR, t);
} finally {
Closer.close(dataInput);
Closer.close(dataOutput);
Closer.close(output);
}