Package freenet.support.compress

Examples of freenet.support.compress.DecompressorThreadManager


  @Override
  public void onSuccess(StreamGenerator streamGenerator, ClientMetadata clientMetadata, List<? extends Compressor> decompressors, final ClientGetState state, ClientContext context) {
    if(logMINOR)
      Logger.minor(this, "Success on "+this+" from "+state+" : length "+streamGenerator.size()+"mime type "+clientMetadata.getMIMEType());
    DecompressorThreadManager decompressorManager = null;
    OutputStream output = null;
    Bucket finalResult = null;
    long maxLen = Math.max(ctx.maxTempLength, ctx.maxOutputLength);
    try {
      finalResult = context.getBucketFactory(persistent()).makeBucket(maxLen);
    } catch (InsufficientDiskSpaceException e) {
            onFailure(new FetchException(FetchExceptionMode.NOT_ENOUGH_DISK_SPACE), state, context);
            return;
    } catch (IOException e) {
      Logger.error(this, "Caught "+e, e);
      onFailure(new FetchException(FetchExceptionMode.BUCKET_ERROR, e), state, context);
      return;
    } catch(Throwable t) {
      Logger.error(this, "Caught "+t, t);
      onFailure(new FetchException(FetchExceptionMode.INTERNAL_ERROR, t), state, context);
      return;
    }

    PipedInputStream pipeIn = null;
    PipedOutputStream pipeOut = null;
    try {
      output = finalResult.getOutputStream();
      // Decompress
      if(decompressors != null) {
        if(logMINOR) Logger.minor(this, "Decompressing...");
        pipeIn = new PipedInputStream();
        pipeOut = new PipedOutputStream(pipeIn);
        decompressorManager = new DecompressorThreadManager(pipeIn, decompressors, maxLen);
        pipeIn = decompressorManager.execute();
        ClientGetWorkerThread worker = new ClientGetWorkerThread(new BufferedInputStream(pipeIn), output, null, null, null, false, null, null, null, context.linkFilterExceptionProvider);
        worker.start();
        streamGenerator.writeTo(pipeOut, context);
        worker.waitFinished();
        // If this throws, we want the whole request to fail.
View Full Code Here


    PipedOutputStream dataOutput = new PipedOutputStream();
    PipedInputStream dataInput = new PipedInputStream();
    OutputStream output = null;

    DecompressorThreadManager decompressorManager = null;
    ClientGetWorkerThread worker = null;
    Bucket finalResult = null;
    FetchResult result = null;

        long maxLen = -1;
        synchronized(this) {
            if(expectedSize > 0) {
                maxLen = expectedSize;
            }
        }
        if(ctx.filterData && maxLen >= 0) {
            maxLen = expectedSize * 2 + 1024;
        }
        if(maxLen == -1) {
            maxLen = Math.max(ctx.maxTempLength, ctx.maxOutputLength);
        }
       
    FetchException ex = null; // set on failure
    try {
      if(returnBucket == null) finalResult = context.getBucketFactory(persistent()).makeBucket(maxLen);
      else finalResult = returnBucket;
      if(logMINOR) Logger.minor(this, "Writing final data to "+finalResult+" return bucket is "+returnBucket);
      dataOutput .connect(dataInput);
      result = new FetchResult(clientMetadata, finalResult);

      // Decompress
      if(decompressors != null) {
        if(logMINOR) Logger.minor(this, "Decompressing...");
        decompressorManager =  new DecompressorThreadManager(dataInput, decompressors, maxLen);
        dataInput = decompressorManager.execute();
      }

      output = finalResult.getOutputStream();
      if(ctx.overrideMIME != null) mimeType = ctx.overrideMIME;
      worker = new ClientGetWorkerThread(new BufferedInputStream(dataInput), output, uri, mimeType, hashes, ctx.filterData, ctx.charset, ctx.prefetchHook, ctx.tagReplacer, context.linkFilterExceptionProvider);
      worker.start();
      try {
        streamGenerator.writeTo(dataOutput, context);
      } catch(IOException e) {
        //Check if the worker thread caught an exception
        worker.getError();
        //If not, throw the original error
        throw e;
      }

      // An error will propagate backwards, so wait for the worker first.
     
      if(logMINOR) Logger.minor(this, "Waiting for hashing, filtration, and writing to finish");
      worker.waitFinished();

      if(decompressorManager != null) {
        if(logMINOR) Logger.minor(this, "Waiting for decompression to finalize");
        decompressorManager.waitFinished();
      }

      if(worker.getClientMetadata() != null) {
        clientMetadata = worker.getClientMetadata();
        result = new FetchResult(clientMetadata, finalResult);
View Full Code Here

                throw new IOException("File is shorter than target length "+length);
            raf.setLength(length);
            InputStream is = new BufferedInputStream(new FileInputStream(raf.getFD()));
            // Check hashes...
           
            DecompressorThreadManager decompressorManager = null;
            ClientGetWorkerThread worker = null;

            worker = new ClientGetWorkerThread(is, new NullOutputStream(), uri, null, hashes, false, null, ctx.prefetchHook, ctx.tagReplacer, context.linkFilterExceptionProvider);
            worker.start();
           
View Full Code Here

        data = context.getBucketFactory(persistent).makeBucket(maxLen);
        output = data.getOutputStream();
        if(decompressors != null) {
          if(logMINOR) Logger.minor(this, "decompressing...");
          pipeOut.connect(pipeIn);
          DecompressorThreadManager decompressorManager =  new DecompressorThreadManager(pipeIn, decompressors, maxLen);
          pipeIn = decompressorManager.execute();
          ClientGetWorkerThread worker = new ClientGetWorkerThread(new BufferedInputStream(pipeIn), output, null, null, null, false, null, null, null, context.linkFilterExceptionProvider);
          worker.start();
          streamGenerator.writeTo(pipeOut, context);
          decompressorManager.waitFinished();
          worker.waitFinished();
        } else streamGenerator.writeTo(output, context);
        // We want to see anything thrown when these are closed.
        output.close(); output = null;
        pipeOut.close(); pipeOut = null;
View Full Code Here

        data = context.getBucketFactory(false).makeBucket(maxLen);
        output = data.getOutputStream();
        if(decompressors != null) {
          if(logMINOR) Logger.minor(this, "decompressing...");
          pipeOut.connect(pipeIn);
          DecompressorThreadManager decompressorManager =  new DecompressorThreadManager(pipeIn, decompressors, maxLen);
          pipeIn = decompressorManager.execute();
          ClientGetWorkerThread worker = new ClientGetWorkerThread(new BufferedInputStream(pipeIn), output, null, null, null, false, null, null, null, context.linkFilterExceptionProvider);
          worker.start();
          streamGenerator.writeTo(pipeOut, context);
          decompressorManager.waitFinished();
          worker.waitFinished();
        } else streamGenerator.writeTo(output, context);

        output.close();
        pipeOut.close();
View Full Code Here

        finalData = context.getBucketFactory(persistent).makeBucket(maxLen);
        output = finalData.getOutputStream();
        if(decompressors != null) {
          if(logMINOR) Logger.minor(this, "decompressing...");
          pipeIn.connect(pipeOut);
          DecompressorThreadManager decompressorManager =  new DecompressorThreadManager(pipeIn, decompressors, maxLen);
          pipeIn = decompressorManager.execute();
          ClientGetWorkerThread worker = new ClientGetWorkerThread(new BufferedInputStream(pipeIn), output, null, null, null, false, null, null, null, context.linkFilterExceptionProvider);
          worker.start();
          streamGenerator.writeTo(pipeOut, context);
          decompressorManager.waitFinished();
          worker.waitFinished();
          // ClientGetWorkerThread will close output.
        } else {
            streamGenerator.writeTo(output, context);
            output.close();
View Full Code Here

TOP

Related Classes of freenet.support.compress.DecompressorThreadManager

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.