Package freenet.client

Examples of freenet.client.FetchException


      Logger.minor(this, "Adding key "+block.getClientKey().getURI()+" to "+this, new Exception("debug"));
    try {
      binaryBlobWriter.addKey(block, context);
    } catch (IOException e) {
      Logger.error(this, "Failed to write key to binary blob stream: "+e, e);
      onFailure(new FetchException(FetchExceptionMode.BUCKET_ERROR, "Failed to write key to binary blob stream: "+e), null, context);
    } catch (BinaryBlobAlreadyClosedException e) {
      Logger.error(this, "Failed to write key to binary blob stream (already closed??): "+e, e);
      onFailure(new FetchException(FetchExceptionMode.BUCKET_ERROR, "Failed to write key to binary blob stream (already closed??): "+e), null, context);
    }
  }
View Full Code Here


    FilterMIMEType type = ContentFilter.getMIMEType(mimeType);
    if(type == null)
      // Not our problem, will be picked up elsewhere.
      return;
    if(!DefaultMIMETypes.isValidExt(mimeType, forceCompatibleExtension))
      throw new FetchException(FetchExceptionMode.MIME_INCOMPATIBLE_WITH_EXTENSION);
  }
View Full Code Here

      output.close();
      if(hashes != null) {
        HashResult[] results = hashStream.getResults();
        if(!HashResult.strictEquals(results, hashes)) {
          Logger.error(this, "Hashes failed verification (length read is "+hashStream.getReadBytes()+") "+" for "+uri);
          throw new FetchException(FetchExceptionMode.CONTENT_HASH_FAILED);
        }
      }

      onFinish();
    } catch(Throwable t) {
View Full Code Here

    if(origURI == null) throw new NullPointerException();
    this.uri = persistent ? origURI.clone() : origURI;
    this.actx = actx;
    this.recursionLevel = recursionLevel + 1;
    if(recursionLevel > ctx.maxRecursionLevel)
      throw new FetchException(FetchExceptionMode.TOO_MUCH_RECURSION, "Too much recursion: "+recursionLevel+" > "+ctx.maxRecursionLevel);
    this.decompressors = new LinkedList<COMPRESSOR_TYPE>();
    this.topDontCompress = topDontCompress;
    this.topCompatibilityMode = topCompatibilityMode;
    if(parent instanceof ClientGetter) {
      metaSnoop = ((ClientGetter)parent).getMetaSnoop();
View Full Code Here

    this.metadata = newMeta;
    this.metaStrings = new ArrayList<String>();
    this.addedMetaStrings = 0;
    this.recursionLevel = fetcher.recursionLevel + 1;
    if(recursionLevel > ctx.maxRecursionLevel)
      throw new FetchException(FetchExceptionMode.TOO_MUCH_RECURSION);
    this.thisKey = fetcher.thisKey;
    // Do not copy the decompressors. Whether the metadata/container is compressed
    // is independant of whether the final data is; when we find the data we will
    // call back into the original fetcher.
    this.decompressors = new LinkedList<COMPRESSOR_TYPE>();
View Full Code Here

    handleMetadata(data, context);
  }
 
  private void handleMetadata(Bucket data, ClientContext context) {
    if(!ctx.followRedirects) {
      onFailure(new FetchException(FetchExceptionMode.INVALID_METADATA, "Told me not to follow redirects (splitfile block??)"), false, context);
      data.free();
      return;
    }
    if(parent.isCancelled()) {
      onFailure(new FetchException(FetchExceptionMode.CANCELLED), false, context);
      data.free();
      return;
    }
    if(data.size() > ctx.maxMetadataSize) {
      onFailure(new FetchException(FetchExceptionMode.TOO_BIG_METADATA), false, context);
      data.free();
      return;
    }
    // Parse metadata
    try {
      metadata = Metadata.construct(data);
            data.free();
            data = null;
      innerWrapHandleMetadata(false, context);
    } catch (MetadataParseException e) {
      onFailure(new FetchException(FetchExceptionMode.INVALID_METADATA, e), false, context);
    } catch (EOFException e) {
      // This is a metadata error too.
      onFailure(new FetchException(FetchExceptionMode.INVALID_METADATA, e), false, context);
    } catch (InsufficientDiskSpaceException e) {
        onFailure(new FetchException(FetchExceptionMode.NOT_ENOUGH_DISK_SPACE), false, context);
    } catch (IOException e) {
      // Bucket error?
      onFailure(new FetchException(FetchExceptionMode.BUCKET_ERROR, e), false, context);
    } finally {
        if(data != null) data.free();
    }
  }
View Full Code Here

    }
    if(parent.isCancelled()) {
      if(logMINOR)
        Logger.minor(this, "Parent is cancelled");
      result.asBucket().free();
      onFailure(new FetchException(FetchExceptionMode.CANCELLED), false, context);
      return;
    }
    if((!ctx.ignoreTooManyPathComponents) && (!metaStrings.isEmpty()) && isFinal) {
      // Some meta-strings left
      if(addedMetaStrings > 0) {
        // Should this be an error?
        // It would be useful to be able to fetch the data ...
        // On the other hand such inserts could cause unpredictable results?
        // Would be useful to make a redirect to the key we actually fetched.
        rcb.onFailure(new FetchException(FetchExceptionMode.INVALID_METADATA, "Invalid metadata: too many path components in redirects", thisKey), this, context);
      } else {
        // TOO_MANY_PATH_COMPONENTS
        // report to user
        if(logMINOR) {
          Logger.minor(this, "Too many path components: for "+uri+" meta="+metaStrings.toString());
        }
        FreenetURI tryURI = uri;
        tryURI = tryURI.dropLastMetaStrings(metaStrings.size());
        rcb.onFailure(new FetchException(FetchExceptionMode.TOO_MANY_PATH_COMPONENTS, result.size(), (rcb == parent), result.getMimeType(), tryURI), this, context);
      }
      result.asBucket().free();
      return;
    } else if(result.size() > ctx.maxOutputLength) {
      rcb.onFailure(new FetchException(FetchExceptionMode.TOO_BIG, result.size(), (rcb == parent), result.getMimeType()), this, context);
      result.asBucket().free();
    } else {
            // Break locks, don't run filtering on FEC thread etc etc.
        context.getJobRunner(persistent()).queueInternal(new PersistentJob() {
           
View Full Code Here

        if(metadata.hasTopData()) {
          if((metadata.topSize > ctx.maxOutputLength) ||
              (metadata.topCompressedSize > ctx.maxTempLength)) {
            // Just in case...
            if(metadata.isSimpleRedirect() || metadata.isSplitfile()) clientMetadata.mergeNoOverwrite(metadata.getClientMetadata()); // even splitfiles can have mime types!
            throw new FetchException(FetchExceptionMode.TOO_BIG, metadata.topSize, true, clientMetadata.getMIMEType());
          }
          rcb.onExpectedTopSize(metadata.topSize, metadata.topCompressedSize, metadata.topBlocksRequired, metadata.topBlocksTotal, context);
          topCompatibilityMode = metadata.getTopCompatibilityCode();
          topDontCompress = metadata.getTopDontCompress();
        }
        HashResult[] hashes = metadata.getHashes();
        if(hashes != null) {
          rcb.onHashes(hashes, context);
        }
      }
      if(metadata.isSimpleManifest()) {
        if(logMINOR) Logger.minor(this, "Is simple manifest");
        String name;
        if(metadata.countDocuments() == 1 && metadata.getDocument("") != null && metadata.getDocument("").isSimpleManifest()) {
          Logger.error(this, "Manifest is called \"\" for "+this, new Exception("error"));
          name = "";
        } else if(metaStrings.isEmpty()) {
          FreenetURI u = uri;
          String last = u.lastMetaString();
          if(last == null || !last.equals(""))
            u = u.addMetaStrings(new String[] { "" });
          else
            u = null;
          throw new FetchException(FetchExceptionMode.NOT_ENOUGH_PATH_COMPONENTS, -1, false, null, u);
        }
        else name = removeMetaString();
        // Since metadata is a document, we just replace metadata here
        if(logMINOR) Logger.minor(this, "Next meta-string: "+name+" length "+name.length()+" for "+this);
        if(name == null) {
          if(!persistent) {
            metadata = metadata.getDefaultDocument();
          } else {
            Metadata newMeta = metadata.grabDefaultDocument();
            metadata = newMeta;
          }
          if(metadata == null)
            throw new FetchException(FetchExceptionMode.NOT_ENOUGH_PATH_COMPONENTS, -1, false, null, uri.addMetaStrings(new String[] { "" }));
        } else {
          if(!persistent) {
            Metadata origMd = metadata;
            metadata = origMd.getDocument(name);
            if (metadata != null && metadata.isSymbolicShortlink()) {
              String oldName = name;
              name = metadata.getSymbolicShortlinkTargetName();
              if (oldName.equals(name)) throw new FetchException(FetchExceptionMode.INVALID_METADATA, "redirect loop: "+name);
              metadata = origMd.getDocument(name);
            }
            thisKey = thisKey.pushMetaString(name);
          } else {
            Metadata newMeta = metadata.grabDocument(name);
            if (newMeta != null && newMeta.isSymbolicShortlink()) {
              String oldName = name;
              name = newMeta.getSymbolicShortlinkTargetName();
              if (oldName.equals(name)) throw new FetchException(FetchExceptionMode.INVALID_METADATA, "redirect loop: "+name);
              newMeta = metadata.getDocument(name);
            }
            metadata = newMeta;
            FreenetURI oldThisKey = thisKey;
            thisKey = thisKey.pushMetaString(name);
          }
          if(metadata == null)
            throw new FetchException(FetchExceptionMode.NOT_IN_ARCHIVE, "can't find "+name);
        }
        continue; // loop
      } else if(metadata.isArchiveManifest()) {
        if(logMINOR) Logger.minor(this, "Is archive manifest (type="+metadata.getArchiveType()+" codec="+metadata.getCompressionCodec()+')');
        if(metaStrings.isEmpty() && ctx.returnZIPManifests) {
          // Just return the archive, whole.
          metadata.setSimpleRedirect();
          continue;
        }
        // First we need the archive metadata.
        // Then parse it. Then we may need to fetch something from inside the archive.
        // It's more efficient to keep the existing ah if we can, and it is vital in
        // the case of binary blobs.
        if(ah == null || !ah.getKey().equals(thisKey)) {
          // Do loop detection on the archive that we are about to fetch.
          actx.doLoopDetection(thisKey);
          ah = context.archiveManager.makeHandler(thisKey, metadata.getArchiveType(), metadata.getCompressionCodec(),
              (parent instanceof ClientGetter ? ((ClientGetter)parent).collectingBinaryBlob() : false), persistent);
        }
        archiveMetadata = metadata;
        metadata = null; // Copied to archiveMetadata, so do not need to clear it
        // ah is set. This means we are currently handling an archive.
        Bucket metadataBucket;
        metadataBucket = ah.getMetadata(actx, context.archiveManager);
        if(metadataBucket != null) {
          try {
            metadata = Metadata.construct(metadataBucket);
            metadataBucket.free();
          } catch (InsufficientDiskSpaceException e) {
              throw new FetchException(FetchExceptionMode.NOT_ENOUGH_DISK_SPACE);
          } catch (IOException e) {
            // Bucket error?
            throw new FetchException(FetchExceptionMode.BUCKET_ERROR, e);
          }
        } else {
          final boolean persistent = this.persistent;
          fetchArchive(false, archiveMetadata, ArchiveManager.METADATA_NAME, new ArchiveExtractCallback() {
                        private static final long serialVersionUID = 1L;
                        @Override
            public void gotBucket(Bucket data, ClientContext context) {
              if(logMINOR) Logger.minor(this, "gotBucket on "+SingleFileFetcher.this+" persistent="+persistent);
              try {
                metadata = Metadata.construct(data);
                data.free();
                innerWrapHandleMetadata(true, context);
              } catch (MetadataParseException e) {
                // Invalid metadata
                onFailure(new FetchException(FetchExceptionMode.INVALID_METADATA, e), false, context);
                return;
              } catch (IOException e) {
                // Bucket error?
                onFailure(new FetchException(FetchExceptionMode.BUCKET_ERROR, e), false, context);
                return;
              }
            }
            @Override
            public void notInArchive(ClientContext context) {
              onFailure(new FetchException(FetchExceptionMode.INTERNAL_ERROR, "No metadata in container! Cannot happen as ArchiveManager should synthesise some!"), false, context);
            }
            @Override
            public void onFailed(ArchiveRestartException e, ClientContext context) {
              SingleFileFetcher.this.onFailure(new FetchException(e), false, context);
            }
            @Override
            public void onFailed(ArchiveFailureException e, ClientContext context) {
              SingleFileFetcher.this.onFailure(new FetchException(e), false, context);
            }
          }, context); // will result in this function being called again
          return;
        }
        metadataBucket.free();
        continue;
      } else if(metadata.isArchiveMetadataRedirect()) {
        if(logMINOR) Logger.minor(this, "Is archive-metadata");
        // Fetch it from the archive
        if(ah == null)
          throw new FetchException(FetchExceptionMode.UNKNOWN_METADATA, "Archive redirect not in an archive manifest");
        String filename = metadata.getArchiveInternalName();
        if(logMINOR) Logger.minor(this, "Fetching "+filename);
        Bucket dataBucket = ah.get(filename, actx, context.archiveManager);
        if(dataBucket != null) {
          if(logMINOR) Logger.minor(this, "Returning data");
          final Metadata newMetadata;
          try {
           
            newMetadata = Metadata.construct(dataBucket);
            dataBucket.free();
          } catch (InsufficientDiskSpaceException e) {
              throw new FetchException(FetchExceptionMode.NOT_ENOUGH_DISK_SPACE);
          } catch (IOException e) {
            throw new FetchException(FetchExceptionMode.BUCKET_ERROR);
          }
          synchronized(this) {
            metadata = newMetadata;
          }
          continue;
        } else {
          if(logMINOR) Logger.minor(this, "Fetching archive (thisKey="+thisKey+ ')');
          // Metadata cannot contain pointers to files which don't exist.
          // We enforce this in ArchiveHandler.
          // Therefore, the archive needs to be fetched.
          final boolean persistent = this.persistent;
          fetchArchive(true, archiveMetadata, filename, new ArchiveExtractCallback() {
                        private static final long serialVersionUID = 1L;
                        @Override
            public void gotBucket(Bucket data, ClientContext context) {
              if(logMINOR) Logger.minor(this, "Returning data");
              final Metadata newMetadata;
              try {
                newMetadata = Metadata.construct(data);
                synchronized(SingleFileFetcher.this) {
                  metadata = newMetadata;
                }
                innerWrapHandleMetadata(true, context);
              } catch (IOException e) {
                onFailure(new FetchException(FetchExceptionMode.BUCKET_ERROR), false, context);
              } catch (MetadataParseException e) {
                onFailure(new FetchException(FetchExceptionMode.INVALID_METADATA), false, context);
              } finally {
                data.free();
              }
            }
            @Override
            public void notInArchive(ClientContext context) {
              onFailure(new FetchException(FetchExceptionMode.NOT_IN_ARCHIVE), false, context);
            }
            @Override
            public void onFailed(ArchiveRestartException e, ClientContext context) {
              SingleFileFetcher.this.onFailure(new FetchException(e), false, context);
            }
            @Override
            public void onFailed(ArchiveFailureException e,  ClientContext context) {
              SingleFileFetcher.this.onFailure(new FetchException(e), false, context);
            }
          }, context);
          // Will call back into this function when it has been fetched.
          return;
        }
      } else if(metadata.isArchiveInternalRedirect()) {
        if(logMINOR) Logger.minor(this, "Is archive-internal redirect");
        clientMetadata.mergeNoOverwrite(metadata.getClientMetadata());
        String mime = clientMetadata.getMIMEType();
        if(mime != null) rcb.onExpectedMIME(clientMetadata, context);
        if(metaStrings.isEmpty() && isFinal && clientMetadata.getMIMETypeNoParams() != null && ctx.allowedMIMETypes != null &&
            !ctx.allowedMIMETypes.contains(clientMetadata.getMIMETypeNoParams())) {
          throw new FetchException(FetchExceptionMode.WRONG_MIME_TYPE, -1, false, clientMetadata.getMIMEType());
        }
        // Fetch it from the archive
        if(ah == null)
          throw new FetchException(FetchExceptionMode.UNKNOWN_METADATA, "Archive redirect not in an archive manifest");
        String filename = metadata.getArchiveInternalName();
        if(logMINOR) Logger.minor(this, "Fetching "+filename);
        Bucket dataBucket = ah.get(filename, actx, context.archiveManager);
        if(dataBucket != null) {
          if(logMINOR) Logger.minor(this, "Returning data");
          final Bucket out;
          try {
            // Data will not be freed until client is finished with it.
            if(persistent) {
              out = context.persistentBucketFactory.makeBucket(dataBucket.size());
              BucketTools.copy(dataBucket, out);
              dataBucket.free();
            } else {
              out = dataBucket;
            }
          } catch (InsufficientDiskSpaceException e) {
              throw new FetchException(FetchExceptionMode.NOT_ENOUGH_DISK_SPACE);
          } catch (IOException e) {
            throw new FetchException(FetchExceptionMode.BUCKET_ERROR);
          }
          // Return the data
          onSuccess(new FetchResult(clientMetadata, out), context);
         
          return;
        } else {
          if(logMINOR) Logger.minor(this, "Fetching archive (thisKey="+thisKey+ ')');
          // Metadata cannot contain pointers to files which don't exist.
          // We enforce this in ArchiveHandler.
          // Therefore, the archive needs to be fetched.
          final boolean persistent = this.persistent;
          fetchArchive(true, archiveMetadata, filename, new ArchiveExtractCallback() {
                        private static final long serialVersionUID = 1L;
                        @Override
            public void gotBucket(Bucket data, ClientContext context) {
              if(logMINOR) Logger.minor(this, "Returning data");
              // Because this will be processed immediately, and because the callback uses a StreamGenerator,
              // we can simply pass in the output bucket, even if it is not persistent.
              // If we ever change it so a StreamGenerator can be saved, we'll have to copy here.
              // Transient buckets should throw if attempted to store.
              onSuccess(new FetchResult(clientMetadata, data), context);
            }
            @Override
            public void notInArchive(ClientContext context) {
              onFailure(new FetchException(FetchExceptionMode.NOT_IN_ARCHIVE), false, context);
            }
            @Override
            public void onFailed(ArchiveRestartException e, ClientContext context) {
              SingleFileFetcher.this.onFailure(new FetchException(e), false, context);
            }
            @Override
            public void onFailed(ArchiveFailureException e, ClientContext context) {
              SingleFileFetcher.this.onFailure(new FetchException(e), false, context);
            }
          }, context);
          // Will call back into this function when it has been fetched.
          return;
        }
      } else if(metadata.isMultiLevelMetadata()) {
        if(logMINOR) Logger.minor(this, "Is multi-level metadata");
        // Fetch on a second SingleFileFetcher, like with archives.
        metadata.setSimpleRedirect();
        final SingleFileFetcher f = new SingleFileFetcher(this, persistent, false, metadata, new MultiLevelMetadataCallback(), ctx, context);
        // Clear our own metadata so it can be garbage collected, it will be replaced by whatever is fetched.
        // The new fetcher has our metadata so we don't need to removeMetadata().
        this.metadata = null;
        // We must transition to the sub-fetcher so that if the request is cancelled, it will get deleted.
        parent.onTransition(this, f, context);
       
        // Break locks. Must not call onFailure(), etc, from within SFF lock.
        context.getJobRunner(persistent).queueInternal(new PersistentJob() {
           
            @Override
            public boolean run(ClientContext context) {
                f.innerWrapHandleMetadata(true, context);
                return true;
            }
           
        });
        return;
      } else if(metadata.isSingleFileRedirect()) {
        if(logMINOR) Logger.minor(this, "Is single-file redirect");
        clientMetadata.mergeNoOverwrite(metadata.getClientMetadata()); // even splitfiles can have mime types!
        if(clientMetadata != null && !clientMetadata.isTrivial()) {
          rcb.onExpectedMIME(clientMetadata, context);
          if(logMINOR) Logger.minor(this, "MIME type is "+clientMetadata);
        }

        String mimeType = clientMetadata.getMIMETypeNoParams();
        if(mimeType != null && ArchiveManager.ARCHIVE_TYPE.isUsableArchiveType(mimeType) && metaStrings.size() > 0) {
          // Looks like an implicit archive, handle as such
          metadata.setArchiveManifest();
          // Pick up MIME type from inside archive
          clientMetadata.clear();
          if(logMINOR) Logger.minor(this, "Handling implicit container... (redirect)");
          continue;
        }
       
        if(metaStrings.isEmpty() && isFinal && mimeType != null && ctx.allowedMIMETypes != null &&
            !ctx.allowedMIMETypes.contains(mimeType)) {
          throw new FetchException(FetchExceptionMode.WRONG_MIME_TYPE, -1, false, clientMetadata.getMIMEType());
        }
       
        // Simple redirect
        // Just create a new SingleFileFetcher
        // Which will then fetch the target URI, and call the rcd.success
        // Hopefully!
        FreenetURI newURI = metadata.getSingleTarget();
        if(logMINOR) Logger.minor(this, "Redirecting to "+newURI);
        ClientKey redirectedKey;
        try {
          BaseClientKey k = BaseClientKey.getBaseKey(newURI);
          if(k instanceof ClientKey)
            redirectedKey = (ClientKey) k;
          else
            // FIXME do we want to allow redirects to USKs?
            // Without redirects to USKs, all SSK and CHKs are static.
            // This may be a desirable property.
            throw new FetchException(FetchExceptionMode.UNKNOWN_METADATA, "Redirect to a USK");
        } catch (MalformedURLException e) {
          throw new FetchException(FetchExceptionMode.INVALID_URI, e);
        }
        ArrayList<String> newMetaStrings = newURI.listMetaStrings();
       
        // Move any new meta strings to beginning of our list of remaining meta strings
        while(!newMetaStrings.isEmpty()) {
          String o = newMetaStrings.remove(newMetaStrings.size()-1);
          metaStrings.add(0, o);
          addedMetaStrings++;
        }

        final SingleFileFetcher f = new SingleFileFetcher(parent, rcb, clientMetadata, redirectedKey, metaStrings, this.uri, addedMetaStrings, ctx, deleteFetchContext, realTimeFlag, actx, ah, archiveMetadata, maxRetries, recursionLevel, false, token, true, isFinal, topDontCompress, topCompatibilityMode, context, false);
        this.deleteFetchContext = false;
        if((redirectedKey instanceof ClientCHK) && !((ClientCHK)redirectedKey).isMetadata()) {
          rcb.onBlockSetFinished(this, context);
          byte [] redirectedCryptoKey = ((ClientCHK)redirectedKey).getCryptoKey();
          if (key instanceof ClientCHK && !Arrays.equals(
              ((ClientCHK)key).getCryptoKey(),
              redirectedCryptoKey))
            redirectedCryptoKey = null;
          // not splitfile, synthesize CompatibilityMode event
          rcb.onSplitfileCompatibilityMode(
              metadata.getMinCompatMode(),
              metadata.getMaxCompatMode(),
              redirectedCryptoKey,
              !((ClientCHK)redirectedKey).isCompressed(),
              true, true,
              context);
        }
        if(metadata.isCompressed()) {
          COMPRESSOR_TYPE codec = metadata.getCompressionCodec();
          f.addDecompressor(codec);
        }
        parent.onTransition(this, f, context);
        f.schedule(context);
        // All done! No longer our problem!
        archiveMetadata = null; // passed on
        return;
      } else if(metadata.isSplitfile()) {
        if(logMINOR) Logger.minor(this, "Fetching splitfile");
       
        clientMetadata.mergeNoOverwrite(metadata.getClientMetadata()); // even splitfiles can have mime types!
       
        String mimeType = clientMetadata.getMIMETypeNoParams();
        if(mimeType != null && ArchiveManager.ARCHIVE_TYPE.isUsableArchiveType(mimeType) && metaStrings.size() > 0) {
          // Looks like an implicit archive, handle as such
          metadata.setArchiveManifest();
          // Pick up MIME type from inside archive
          clientMetadata.clear();
          if(logMINOR) Logger.minor(this, "Handling implicit container... (splitfile)");
          continue;
        } else {
          if(clientMetadata != null && !clientMetadata.isTrivial())
            rcb.onExpectedMIME(clientMetadata, context);
        }
       
        if(metaStrings.isEmpty() && isFinal && mimeType != null && ctx.allowedMIMETypes != null &&
            !ctx.allowedMIMETypes.contains(mimeType)) {
          // Just in case...
          long len = metadata.uncompressedDataLength();
          throw new FetchException(FetchExceptionMode.WRONG_MIME_TYPE, len, false, clientMetadata.getMIMEType());
        }
       
        // Splitfile (possibly compressed)
       
        if(metadata.isCompressed()) {
          COMPRESSOR_TYPE codec = metadata.getCompressionCodec();
          addDecompressor(codec);
        }
       
        if(isFinal && !ctx.ignoreTooManyPathComponents) {
          if(!metaStrings.isEmpty()) {
            // Some meta-strings left
            if(addedMetaStrings > 0) {
              // Should this be an error?
              // It would be useful to be able to fetch the data ...
              // On the other hand such inserts could cause unpredictable results?
              // Would be useful to make a redirect to the key we actually fetched.
              rcb.onFailure(new FetchException(FetchExceptionMode.INVALID_METADATA, "Invalid metadata: too many path components in redirects", thisKey), this, context);
            } else {
              // TOO_MANY_PATH_COMPONENTS
              // report to user
              FreenetURI tryURI = uri;
              tryURI = tryURI.dropLastMetaStrings(metaStrings.size());
              rcb.onFailure(new FetchException(FetchExceptionMode.TOO_MANY_PATH_COMPONENTS, metadata.uncompressedDataLength(), (rcb == parent), clientMetadata.getMIMEType(), tryURI), this, context);
            }
            // Just in case...
            return;
          }
        } else
          if(logMINOR) Logger.minor(this, "Not finished: rcb="+rcb+" for "+this);
       
        final long len = metadata.dataLength();
        final long uncompressedLen = metadata.isCompressed() ? metadata.uncompressedDataLength() : len;
       
        if((uncompressedLen > ctx.maxOutputLength) ||
            (len > ctx.maxTempLength)) {
          // Just in case...
          boolean compressed = metadata.isCompressed();
          throw new FetchException(FetchExceptionMode.TOO_BIG, uncompressedLen, isFinal && decompressors.size() <= (compressed ? 1 : 0), clientMetadata.getMIMEType());
        }
       
        ClientGetState sf;
        boolean reallyFinal = isFinal;
        if(isFinal && !parent.isCurrentState(this)) {
            Logger.error(this, "isFinal but not the current state for "+this,
                    new Exception("error"));
            reallyFinal = false;
        }
        sf = new SplitFileFetcher(metadata, rcb, parent, ctx, realTimeFlag,
                decompressors, clientMetadata, token, topDontCompress,
                topCompatibilityMode, persistent, thisKey, reallyFinal, context);
        this.deleteFetchContext = false;
        parent.onTransition(this, sf, context);
        try {
          sf.schedule(context);
        } catch (KeyListenerConstructionException e) {
          onFailure(e.getFetchException(), false, context);
          return;
        }
        rcb.onBlockSetFinished(this, context);
        // Clear our own metadata, we won't need it any more.
        // Note that SplitFileFetcher() above will have used the keys from the metadata,
        // and will have removed them from it so they don't get removed here.
        // Lack of garbage collection in db4o is a PITA!
        // For multi-level metadata etc see above.
        return;
      } else {
        Logger.error(this, "Don't know what to do with metadata: "+metadata);
        throw new FetchException(FetchExceptionMode.UNKNOWN_METADATA);
      }
    }
  }
View Full Code Here

  // LOCKING: If transient, DO NOT call this method from within handleMetadata.
  protected void innerWrapHandleMetadata(boolean notFinalizedSize, ClientContext context) {
    try {
      handleMetadata(context);
    } catch (MetadataParseException e) {
      onFailure(new FetchException(FetchExceptionMode.INVALID_METADATA, e), false, context);
    } catch (FetchException e) {
      if(notFinalizedSize)
        e.setNotFinalizedSize();
      onFailure(e, false, context);
    } catch (ArchiveFailureException e) {
      onFailure(new FetchException(e), false, context);
    } catch (ArchiveRestartException e) {
      onFailure(new FetchException(e), false, context);
    }
  }
View Full Code Here

      if(edition <= usk.suggestedEdition) {
        context.uskManager.startTemporaryBackgroundFetcher(usk, context, ctx, true, realTimeFlag);
        edition = context.uskManager.lookupKnownGood(usk);
        if(edition > usk.suggestedEdition) {
          if(logMINOR) Logger.minor(SingleFileFetcher.class, "Redirecting to edition "+edition);
          cb.onFailure(new FetchException(FetchExceptionMode.PERMANENT_REDIRECT, usk.copy(edition).getURI().addMetaStrings(metaStrings)), null, context);
          return null;
        } else if(edition == -1 &&
            context.uskManager.lookupLatestSlot(usk) == -1) { // We do not want to be going round and round here!
          // Check the datastore first.
          USKFetcherTag tag =
            context.uskManager.getFetcher(usk.copy(usk.suggestedEdition), ctx, false, requester.persistent(),
                realTimeFlag, new MyUSKFetcherCallback(requester, cb, usk, metaStrings, ctx, actx, realTimeFlag, maxRetries, recursionLevel, dontTellClientGet, l, requester.persistent(), true), false, context, true);
          if(isEssential)
            requester.addMustSucceedBlocks(1);
          return tag;
         
        } else {
          // Transition to SingleFileFetcher
          GetCompletionCallback myCB =
            new USKProxyCompletionCallback(usk, cb, requester.persistent());
          // Want to update the latest known good iff the fetch succeeds.
          SingleFileFetcher sf =
            new SingleFileFetcher(requester, myCB, null, usk.getSSK(), metaStrings,
                usk.getURI().addMetaStrings(metaStrings), 0, ctx, false, realTimeFlag, actx, null, null, maxRetries, recursionLevel,
                dontTellClientGet, l, isEssential, isFinal, false, (short)0, context, false);
          return sf;
        }
      } else {
        cb.onFailure(new FetchException(FetchExceptionMode.PERMANENT_REDIRECT, usk.copy(edition).getURI().addMetaStrings(metaStrings)), null, context);
        return null;
      }
    } else {
      // Do a thorough, blocking search
      USKFetcherTag tag =
View Full Code Here

TOP

Related Classes of freenet.client.FetchException

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.