if(logMINOR) Logger.minor(this, "Next meta-string: "+name+" length "+name.length()+" for "+this);
if(name == null) {
if(!persistent) {
metadata = metadata.getDefaultDocument();
} else {
Metadata newMeta = metadata.grabDefaultDocument();
metadata = newMeta;
}
if(metadata == null)
throw new FetchException(FetchExceptionMode.NOT_ENOUGH_PATH_COMPONENTS, -1, false, null, uri.addMetaStrings(new String[] { "" }));
} else {
if(!persistent) {
Metadata origMd = metadata;
metadata = origMd.getDocument(name);
if (metadata != null && metadata.isSymbolicShortlink()) {
String oldName = name;
name = metadata.getSymbolicShortlinkTargetName();
if (oldName.equals(name)) throw new FetchException(FetchExceptionMode.INVALID_METADATA, "redirect loop: "+name);
metadata = origMd.getDocument(name);
}
thisKey = thisKey.pushMetaString(name);
} else {
Metadata newMeta = metadata.grabDocument(name);
if (newMeta != null && newMeta.isSymbolicShortlink()) {
String oldName = name;
name = newMeta.getSymbolicShortlinkTargetName();
if (oldName.equals(name)) throw new FetchException(FetchExceptionMode.INVALID_METADATA, "redirect loop: "+name);
newMeta = metadata.getDocument(name);
}
metadata = newMeta;
FreenetURI oldThisKey = thisKey;
thisKey = thisKey.pushMetaString(name);
}
if(metadata == null)
throw new FetchException(FetchExceptionMode.NOT_IN_ARCHIVE, "can't find "+name);
}
continue; // loop
} else if(metadata.isArchiveManifest()) {
if(logMINOR) Logger.minor(this, "Is archive manifest (type="+metadata.getArchiveType()+" codec="+metadata.getCompressionCodec()+')');
if(metaStrings.isEmpty() && ctx.returnZIPManifests) {
// Just return the archive, whole.
metadata.setSimpleRedirect();
continue;
}
// First we need the archive metadata.
// Then parse it. Then we may need to fetch something from inside the archive.
// It's more efficient to keep the existing ah if we can, and it is vital in
// the case of binary blobs.
if(ah == null || !ah.getKey().equals(thisKey)) {
// Do loop detection on the archive that we are about to fetch.
actx.doLoopDetection(thisKey);
ah = context.archiveManager.makeHandler(thisKey, metadata.getArchiveType(), metadata.getCompressionCodec(),
(parent instanceof ClientGetter ? ((ClientGetter)parent).collectingBinaryBlob() : false), persistent);
}
archiveMetadata = metadata;
metadata = null; // Copied to archiveMetadata, so do not need to clear it
// ah is set. This means we are currently handling an archive.
Bucket metadataBucket;
metadataBucket = ah.getMetadata(actx, context.archiveManager);
if(metadataBucket != null) {
try {
metadata = Metadata.construct(metadataBucket);
metadataBucket.free();
} catch (InsufficientDiskSpaceException e) {
throw new FetchException(FetchExceptionMode.NOT_ENOUGH_DISK_SPACE);
} catch (IOException e) {
// Bucket error?
throw new FetchException(FetchExceptionMode.BUCKET_ERROR, e);
}
} else {
final boolean persistent = this.persistent;
fetchArchive(false, archiveMetadata, ArchiveManager.METADATA_NAME, new ArchiveExtractCallback() {
private static final long serialVersionUID = 1L;
@Override
public void gotBucket(Bucket data, ClientContext context) {
if(logMINOR) Logger.minor(this, "gotBucket on "+SingleFileFetcher.this+" persistent="+persistent);
try {
metadata = Metadata.construct(data);
data.free();
innerWrapHandleMetadata(true, context);
} catch (MetadataParseException e) {
// Invalid metadata
onFailure(new FetchException(FetchExceptionMode.INVALID_METADATA, e), false, context);
return;
} catch (IOException e) {
// Bucket error?
onFailure(new FetchException(FetchExceptionMode.BUCKET_ERROR, e), false, context);
return;
}
}
@Override
public void notInArchive(ClientContext context) {
onFailure(new FetchException(FetchExceptionMode.INTERNAL_ERROR, "No metadata in container! Cannot happen as ArchiveManager should synthesise some!"), false, context);
}
@Override
public void onFailed(ArchiveRestartException e, ClientContext context) {
SingleFileFetcher.this.onFailure(new FetchException(e), false, context);
}
@Override
public void onFailed(ArchiveFailureException e, ClientContext context) {
SingleFileFetcher.this.onFailure(new FetchException(e), false, context);
}
}, context); // will result in this function being called again
return;
}
metadataBucket.free();
continue;
} else if(metadata.isArchiveMetadataRedirect()) {
if(logMINOR) Logger.minor(this, "Is archive-metadata");
// Fetch it from the archive
if(ah == null)
throw new FetchException(FetchExceptionMode.UNKNOWN_METADATA, "Archive redirect not in an archive manifest");
String filename = metadata.getArchiveInternalName();
if(logMINOR) Logger.minor(this, "Fetching "+filename);
Bucket dataBucket = ah.get(filename, actx, context.archiveManager);
if(dataBucket != null) {
if(logMINOR) Logger.minor(this, "Returning data");
final Metadata newMetadata;
try {
newMetadata = Metadata.construct(dataBucket);
dataBucket.free();
} catch (InsufficientDiskSpaceException e) {
throw new FetchException(FetchExceptionMode.NOT_ENOUGH_DISK_SPACE);
} catch (IOException e) {
throw new FetchException(FetchExceptionMode.BUCKET_ERROR);
}
synchronized(this) {
metadata = newMetadata;
}
continue;
} else {
if(logMINOR) Logger.minor(this, "Fetching archive (thisKey="+thisKey+ ')');
// Metadata cannot contain pointers to files which don't exist.
// We enforce this in ArchiveHandler.
// Therefore, the archive needs to be fetched.
final boolean persistent = this.persistent;
fetchArchive(true, archiveMetadata, filename, new ArchiveExtractCallback() {
private static final long serialVersionUID = 1L;
@Override
public void gotBucket(Bucket data, ClientContext context) {
if(logMINOR) Logger.minor(this, "Returning data");
final Metadata newMetadata;
try {
newMetadata = Metadata.construct(data);
synchronized(SingleFileFetcher.this) {
metadata = newMetadata;
}