Package freenet.client

Examples of freenet.client.FetchException


                // checking, and might have non-obvious complications if we e.g. have data loss in
                // FEC decoding.
                Logger.warning(this, "Ignoring last block");
                return false;
            } else {
                parent.fail(new FetchException(FetchExceptionMode.SPLITFILE_ERROR, "Splitfile block is too short"));
                return false;
            }
        }
        SplitFileFetcherCrossSegmentStorage callback = null;
        // Clearer to do duplicate handling here, plus we only need to decode once.
View Full Code Here


      }
    } catch (PersistenceDisabledException e) {
      // Impossible
      Logger.error(this, "Failed to start: "+e);
      synchronized(this) {
        this.failed = new FetchException(FetchExceptionMode.INTERNAL_ERROR, e);
        this.finished = true;
      }
    }
  }
View Full Code Here

    mimeType = ContentFilter.stripMIMEType(mimeType);
    FilterMIMEType type = ContentFilter.getMIMEType(mimeType);
    if(type == null || ((!type.safeToRead) && type.readFilter == null)) {
      UnknownContentTypeException e = new UnknownContentTypeException(mimeType);
      data.free();
      onFailure(new FetchException(e.getFetchErrorCode(), data.size(), e, mimeType), null);
      return true;
    } else if(type.safeToRead) {
      tracker.removeFetcher(this);
      onSuccess(new FetchResult(new ClientMetadata(mimeType), data), null);
      return true;
View Full Code Here

        } else {
            wantBinaryBlob = false;
        }
        blockFetchContext = new FetchContext(fetchContext, FetchContext.SPLITFILE_DEFAULT_BLOCK_MASK, true, null);
        if(parent.isCancelled())
            throw new FetchException(FetchExceptionMode.CANCELLED);
       
        try {
            // Completion via truncation.
            if(isFinalFetch && cb instanceof FileGetCompletionCallback &&
                    (decompressors == null || decompressors.size() == 0) &&
                    !fetchContext.filterData) {
                FileGetCompletionCallback fileCallback = ((FileGetCompletionCallback)cb);
                File targetFile = fileCallback.getCompletionFile();
                if(targetFile != null) {
                    callbackCompleteViaTruncation = fileCallback;
                    fileCompleteViaTruncation = File.createTempFile(targetFile.getName(), ".freenet-tmp", targetFile.getParentFile());
                    // Storage must actually create the RAF since it knows the length.
                } else {
                    callbackCompleteViaTruncation = null;
                    fileCompleteViaTruncation = null;
                }
            } else {
                callbackCompleteViaTruncation = null;
                fileCompleteViaTruncation = null;
            }
            // Construct the storage.
            ChecksumChecker checker = new CRCChecksumChecker();
            storage = new SplitFileFetcherStorage(metadata, this, decompressors, clientMetadata,
                    topDontCompress, topCompatibilityMode, fetchContext, realTimeFlag, getSalter(),
                    thisKey, parent.getURI(), isFinalFetch, parent.getClientDetail(checker),
                    context.random, context.tempBucketFactory,
                    persistent ? context.persistentRAFFactory : context.tempRAFFactory,
                    persistent ? context.jobRunner : context.dummyJobRunner,
                    context.ticker, context.memoryLimitedJobRunner, checker, persistent,
                    fileCompleteViaTruncation, context.getFileRandomAccessBufferFactory(persistent),
                    context.getChkFetchScheduler(realTimeFlag).fetchingKeys());
        } catch (InsufficientDiskSpaceException e) {
            throw new FetchException(FetchExceptionMode.NOT_ENOUGH_DISK_SPACE);
        } catch (IOException e) {
            Logger.error(this, "Failed to start splitfile fetcher because of disk I/O error?: "+e, e);
            throw new FetchException(FetchExceptionMode.BUCKET_ERROR, e);
        }
        long eventualLength = Math.max(storage.decompressedLength, metadata.uncompressedDataLength());
        cb.onExpectedSize(eventualLength, context);
        if(metadata.uncompressedDataLength() > 0)
            cb.onFinalizedMetadata();
        if(eventualLength > 0 && fetchContext.maxOutputLength > 0 && eventualLength > fetchContext.maxOutputLength)
            throw new FetchException(FetchExceptionMode.TOO_BIG, eventualLength, true, clientMetadata.getMIMEType());
        getter = new SplitFileFetcherGet(this, storage);
        raf = storage.getRAF();
        if(logMINOR)
            Logger.minor(this, "Created "+(persistent?"persistent" : "transient")+" download for "+
                    thisKey+" on "+raf+" for "+this);
View Full Code Here

     * throws.
     * @param e The IOException, generated when accessing the on-disk storage.
     */
    @Override
    public void failOnDiskError(IOException e) {
        fail(new FetchException(FetchExceptionMode.BUCKET_ERROR));
    }
View Full Code Here

    /** Fail the whole splitfile request when we get unrecoverable data corruption, e.g. can't
     * read the keys. FIXME ROBUSTNESS in some cases this could actually be recovered by
     * restarting from the metadata or the original URI. */
    @Override
    public void failOnDiskError(ChecksumFailedException e) {
        fail(new FetchException(FetchExceptionMode.BUCKET_ERROR));
    }
View Full Code Here

        cb.onFailure(e, this, context);
    }

    @Override
    public void cancel(ClientContext context) {
        fail(new FetchException(FetchExceptionMode.CANCELLED));
    }
View Full Code Here

                    context.jobRunner.newSalt(), salter, resumed,
                    callbackCompleteViaTruncation != null);
        } catch (ResumeFailedException e) {
            raf.free();
            Logger.error(this, "Failed to resume storage file: "+e+" for "+raf, e);
            throw new FetchException(FetchExceptionMode.BUCKET_ERROR, e);
        } catch (IOException e) {
            raf.free();
            Logger.error(this, "Failed to resume due to I/O error: "+e+" raf = "+raf, e);
            throw new FetchException(FetchExceptionMode.BUCKET_ERROR, e);
        } catch (StorageFormatException e) {
            raf.free();
            Logger.error(this, "Failed to resume due to storage error: "+e+" raf = "+raf, e);
            throw new FetchException(FetchExceptionMode.INTERNAL_ERROR, "Resume failed: "+e, e);
        } catch (FetchException e) {
            raf.free();
            throw e;
        }
        synchronized(this) {
            lastNotifiedStoreFetch = System.currentTimeMillis();
        }
        getter = new SplitFileFetcherGet(this, storage);
        try {
            if(storage.start(resumed))
                getter.schedule(context, storage.hasCheckedStore());
        } catch (KeyListenerConstructionException e) {
            Logger.error(this, "Key listener construction failed during resume: "+e, e);
            fail(new FetchException(FetchExceptionMode.INTERNAL_ERROR, "Resume failed: "+e, e));
            return;
        }
    }
View Full Code Here

        } else
          currentState.schedule(context);
      }
      if(cancelled) cancel();
    } catch (MalformedURLException e) {
      throw new FetchException(FetchExceptionMode.INVALID_URI, e);
    } catch (KeyListenerConstructionException e) {
      onFailure(e.getFetchException(), currentState, context);
    }
    return true;
  }
View Full Code Here

    // Fetching the container is essentially a full success, we should update the latest known good.
    context.uskManager.checkUSK(uri, persistent(), false);
    try {
      if (binaryBlobWriter != null && !dontFinalizeBlobWriter) binaryBlobWriter.finalizeBucket();
    } catch (IOException ioe) {
      onFailure(new FetchException(FetchExceptionMode.BUCKET_ERROR, "Failed to close binary blob stream: "+ioe), null, context);
      return;
    } catch (BinaryBlobAlreadyClosedException e) {
      onFailure(new FetchException(FetchExceptionMode.BUCKET_ERROR, "Failed to close binary blob stream, already closed: "+e, e), null, context);
      return;
    }
    String mimeType = clientMetadata == null ? null : clientMetadata.getMIMEType();
   
    if(forceCompatibleExtension != null && ctx.filterData) {
        if(mimeType == null) {
            onFailure(new FetchException(FetchExceptionMode.MIME_INCOMPATIBLE_WITH_EXTENSION, "No MIME type but need specific extension \""+forceCompatibleExtension+"\""), null, context);
            return;
        }
      try {
        checkCompatibleExtension(mimeType);
      } catch (FetchException e) {
        onFailure(e, null, context);
        return;
      }
    }

    synchronized(this) {
      finished = true;
      currentState = null;
      expectedMIME = mimeType;
       
    }
    // Rest of method does not need to be synchronized.
    // Variables will be updated on exit of method, and the only thing that is
    // set is the returnBucket and the result. Not locking not only prevents
    // nested locking resulting in deadlocks, it also prevents long locks due to
    // doing massive encrypted I/Os while holding a lock.

    PipedOutputStream dataOutput = new PipedOutputStream();
    PipedInputStream dataInput = new PipedInputStream();
    OutputStream output = null;

    DecompressorThreadManager decompressorManager = null;
    ClientGetWorkerThread worker = null;
    Bucket finalResult = null;
    FetchResult result = null;

        long maxLen = -1;
        synchronized(this) {
            if(expectedSize > 0) {
                maxLen = expectedSize;
            }
        }
        if(ctx.filterData && maxLen >= 0) {
            maxLen = expectedSize * 2 + 1024;
        }
        if(maxLen == -1) {
            maxLen = Math.max(ctx.maxTempLength, ctx.maxOutputLength);
        }
       
    FetchException ex = null; // set on failure
    try {
      if(returnBucket == null) finalResult = context.getBucketFactory(persistent()).makeBucket(maxLen);
      else finalResult = returnBucket;
      if(logMINOR) Logger.minor(this, "Writing final data to "+finalResult+" return bucket is "+returnBucket);
      dataOutput .connect(dataInput);
      result = new FetchResult(clientMetadata, finalResult);

      // Decompress
      if(decompressors != null) {
        if(logMINOR) Logger.minor(this, "Decompressing...");
        decompressorManager =  new DecompressorThreadManager(dataInput, decompressors, maxLen);
        dataInput = decompressorManager.execute();
      }

      output = finalResult.getOutputStream();
      if(ctx.overrideMIME != null) mimeType = ctx.overrideMIME;
      worker = new ClientGetWorkerThread(new BufferedInputStream(dataInput), output, uri, mimeType, hashes, ctx.filterData, ctx.charset, ctx.prefetchHook, ctx.tagReplacer, context.linkFilterExceptionProvider);
      worker.start();
      try {
        streamGenerator.writeTo(dataOutput, context);
      } catch(IOException e) {
        //Check if the worker thread caught an exception
        worker.getError();
        //If not, throw the original error
        throw e;
      }

      // An error will propagate backwards, so wait for the worker first.
     
      if(logMINOR) Logger.minor(this, "Waiting for hashing, filtration, and writing to finish");
      worker.waitFinished();

      if(decompressorManager != null) {
        if(logMINOR) Logger.minor(this, "Waiting for decompression to finalize");
        decompressorManager.waitFinished();
      }

      if(worker.getClientMetadata() != null) {
        clientMetadata = worker.getClientMetadata();
        result = new FetchResult(clientMetadata, finalResult);
      }
      // These must be updated for ClientGet.
      synchronized(this) {
          this.expectedMIME = result.getMimeType();
          this.expectedSize = result.size();
      }
    } catch(UnsafeContentTypeException e) {
      Logger.normal(this, "Error filtering content: will not validate", e);
      ex = e.createFetchException(ctx.overrideMIME != null ? ctx.overrideMIME : expectedMIME, expectedSize);
      /*Not really the state's fault*/
    } catch(URISyntaxException e) {
      //Impossible
      Logger.error(this, "URISyntaxException converting a FreenetURI to a URI!: "+e, e);
      ex = new FetchException(FetchExceptionMode.INTERNAL_ERROR, e);
      /*Not really the state's fault*/
    } catch(CompressionOutputSizeException e) {
      Logger.error(this, "Caught "+e, e);
      ex = new FetchException(FetchExceptionMode.TOO_BIG, e);
    } catch (InsufficientDiskSpaceException e) {
        ex = new FetchException(FetchExceptionMode.NOT_ENOUGH_DISK_SPACE);
    } catch(IOException e) {
      Logger.error(this, "Caught "+e, e);
      ex = new FetchException(FetchExceptionMode.BUCKET_ERROR, e);
    } catch(FetchException e) {
      Logger.error(this, "Caught "+e, e);
      ex = e;
    } catch(Throwable t) {
      Logger.error(this, "Caught "+t, t);
      ex = new FetchException(FetchExceptionMode.INTERNAL_ERROR, t);
    } finally {
      Closer.close(dataInput);
      Closer.close(dataOutput);
      Closer.close(output);
    }
View Full Code Here

TOP

Related Classes of freenet.client.FetchException

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.