Package freenet.client

Examples of freenet.client.FetchException


            ClientGetState state, ClientContext context) {
        context.uskManager.checkUSK(uri, persistent(), false);
        try {
            if (binaryBlobWriter != null && !dontFinalizeBlobWriter) binaryBlobWriter.finalizeBucket();
        } catch (IOException ioe) {
            onFailure(new FetchException(FetchExceptionMode.BUCKET_ERROR, "Failed to close binary blob stream: "+ioe), null, context);
            return;
        } catch (BinaryBlobAlreadyClosedException e) {
            onFailure(new FetchException(FetchExceptionMode.BUCKET_ERROR, "Failed to close binary blob stream, already closed: "+e, e), null, context);
            return;
        }
        File completionFile = getCompletionFile();
        assert(completionFile != null);
        assert(!ctx.filterData);
        Logger.normal(this, "Succeeding via truncation from "+tempFile+" to "+completionFile);
        FetchException ex = null;
        RandomAccessFile raf = null;
        FetchResult result = null;
        try {
            raf = new RandomAccessFile(tempFile, "rw");
            if(raf.length() < length)
                throw new IOException("File is shorter than target length "+length);
            raf.setLength(length);
            InputStream is = new BufferedInputStream(new FileInputStream(raf.getFD()));
            // Check hashes...
           
            DecompressorThreadManager decompressorManager = null;
            ClientGetWorkerThread worker = null;

            worker = new ClientGetWorkerThread(is, new NullOutputStream(), uri, null, hashes, false, null, ctx.prefetchHook, ctx.tagReplacer, context.linkFilterExceptionProvider);
            worker.start();
           
            if(logMINOR) Logger.minor(this, "Waiting for hashing, filtration, and writing to finish");
            worker.waitFinished();
           
            is.close();
            is = null;
            raf = null; // FD is closed.
           
            // We are still here so it worked.
           
            if(!FileUtil.renameTo(tempFile, completionFile))
                throw new FetchException(FetchExceptionMode.BUCKET_ERROR, "Failed to rename from temp file "+tempFile);
           
            // Success!
           
            synchronized(this) {
                finished = true;
                currentState = null;
                expectedMIME = metadata.getMIMEType();
                expectedSize = length;
            }
           
            result = new FetchResult(metadata, returnBucket);
           
        } catch (IOException e) {
            Logger.error(this, "Failed while completing via truncation: "+e, e);
            ex = new FetchException(FetchExceptionMode.BUCKET_ERROR, e);
        } catch (URISyntaxException e) {
            Logger.error(this, "Impossible failure while completing via truncation: "+e, e);
            ex = new FetchException(FetchExceptionMode.INTERNAL_ERROR, e);
        } catch(FetchException e) {
            // Hashes failed.
            Logger.error(this, "Caught "+e, e);
            ex = e;
        } catch (Throwable e) {
            Logger.error(this, "Failed while completing via truncation: "+e, e);
            ex = new FetchException(FetchExceptionMode.INTERNAL_ERROR, e);
        }
        if(ex != null) {
            onFailure(ex, state, context, true);
            if(raf != null)
                try {
View Full Code Here


          ar = archiveRestarts;
        }
        if(logMINOR)
          Logger.minor(this, "Archive restart on "+this+" ar="+ar);
        if(ar > ctx.maxArchiveRestarts)
          e = new FetchException(FetchExceptionMode.TOO_MANY_ARCHIVE_RESTARTS);
        else {
          try {
            start(context);
          } catch (FetchException e1) {
            e = e1;
            continue;
          }
          return;
        }
      }
      boolean alreadyFinished = false;
      synchronized(this) {
        if(finished && !force) {
          if(!cancelled)
            Logger.error(this, "Already finished - not calling callbacks on "+this, new Exception("error"));
          alreadyFinished = true;
        }
        finished = true;
        oldState = currentState;
        currentState = null;
        String mime = e.getExpectedMimeType();
        if(mime != null)
            this.expectedMIME = mime;
      }
      if(!alreadyFinished) {
        try {
          if (binaryBlobWriter != null && !dontFinalizeBlobWriter) binaryBlobWriter.finalizeBucket();
        } catch (IOException ioe) {
          // the request is already failed but fblob creation failed too
          // the invalid fblob must be told, more important then an valid but incomplete fblob (ADNF for example)
          if(e.mode != FetchExceptionMode.CANCELLED && !force)
            e = new FetchException(FetchExceptionMode.BUCKET_ERROR, "Failed to close binary blob stream: "+ioe);
        } catch (BinaryBlobAlreadyClosedException ee) {
          if(e.mode != FetchExceptionMode.BUCKET_ERROR && e.mode != FetchExceptionMode.CANCELLED && !force)
            e = new FetchException(FetchExceptionMode.BUCKET_ERROR, "Failed to close binary blob stream, already closed: "+ee, ee);
        }
      }
      if(e.errorCodes != null && e.errorCodes.isOneCodeOnly())
        e = new FetchException(e.errorCodes.getFirstCodeFetch());
      if(e.mode == FetchExceptionMode.DATA_NOT_FOUND && super.successfulBlocks > 0)
        e = new FetchException(e, FetchExceptionMode.ALL_DATA_NOT_FOUND);
      if(logMINOR) Logger.minor(this, "onFailure("+e+", "+state+") on "+this+" for "+uri, e);
      final FetchException e1 = e;
      if(!alreadyFinished)
        clientCallback.onFailure(e1, ClientGetter.this);
      return;
    }
  }
View Full Code Here

    Bucket finalResult = null;
    long maxLen = Math.max(ctx.maxTempLength, ctx.maxOutputLength);
    try {
      finalResult = context.getBucketFactory(persistent()).makeBucket(maxLen);
    } catch (InsufficientDiskSpaceException e) {
            onFailure(new FetchException(FetchExceptionMode.NOT_ENOUGH_DISK_SPACE), state, context);
            return;
    } catch (IOException e) {
      Logger.error(this, "Caught "+e, e);
      onFailure(new FetchException(FetchExceptionMode.BUCKET_ERROR, e), state, context);
      return;
    } catch(Throwable t) {
      Logger.error(this, "Caught "+t, t);
      onFailure(new FetchException(FetchExceptionMode.INTERNAL_ERROR, t), state, context);
      return;
    }

    PipedInputStream pipeIn = null;
    PipedOutputStream pipeOut = null;
    try {
      output = finalResult.getOutputStream();
      // Decompress
      if(decompressors != null) {
        if(logMINOR) Logger.minor(this, "Decompressing...");
        pipeIn = new PipedInputStream();
        pipeOut = new PipedOutputStream(pipeIn);
        decompressorManager = new DecompressorThreadManager(pipeIn, decompressors, maxLen);
        pipeIn = decompressorManager.execute();
        ClientGetWorkerThread worker = new ClientGetWorkerThread(new BufferedInputStream(pipeIn), output, null, null, null, false, null, null, null, context.linkFilterExceptionProvider);
        worker.start();
        streamGenerator.writeTo(pipeOut, context);
        worker.waitFinished();
        // If this throws, we want the whole request to fail.
        pipeOut.close(); pipeOut = null;
      } else {
          streamGenerator.writeTo(output, context);
          // If this throws, we want the whole request to fail.
          output.close(); output = null;
      }
    } catch(IOException e) {
      Logger.error(this, "Caught "+e, e);
      onFailure(new FetchException(FetchExceptionMode.INTERNAL_ERROR, e), state, context);
    } catch (Throwable t) {
      Logger.error(this, "Caught "+t, t);
      onFailure(new FetchException(FetchExceptionMode.INTERNAL_ERROR, t), state, context);
      return;
    } finally {
      Closer.close(output);
      Closer.close(pipeOut);
    }
View Full Code Here

    }

    @Override
    public void onFailure(LowLevelGetException e, SendableRequestItem token,
            ClientContext context) {
        FetchException fe = translateException(e);
        if(fe.isDefinitelyFatal()) {
            // If the error is definitely-fatal it means there is either a serious local problem
            // or the inserted data was corrupt. So we fail the entire splitfile immediately.
            // We don't track which blocks have fatally failed.
            if(logMINOR) Logger.minor(this, "Fatal failure: "+fe+" for "+token);
            parent.fail(fe);
View Full Code Here

    }
    FreenetURI uri = e.newURI;
    if(uri != null) {
      // FIXME what are we doing here anyway? Document!
      uri = usk.turnMySSKIntoUSK(uri);
      e = new FetchException(e, uri);
    }
    cb.onFailure(e, state, context);
  }
View Full Code Here

        int segmentCount = metadata.getSegmentCount();
       
        if(splitfileType == SplitfileAlgorithm.NONREDUNDANT) {
            if(splitfileCheckBlocks > 0) {
                Logger.error(this, "Splitfile type is SPLITFILE_NONREDUNDANT yet "+splitfileCheckBlocks+" check blocks found!! : "+this);
                throw new FetchException(FetchExceptionMode.INVALID_METADATA, "Splitfile type is non-redundant yet have "+splitfileCheckBlocks+" check blocks");
            }
        } else if(splitfileType == SplitfileAlgorithm.ONION_STANDARD) {
           
            boolean dontCompress = decompressors.isEmpty();
            if(topCompatibilityMode != 0) {
                // If we have top compatibility mode, then we can give a definitive answer immediately, with the splitfile key, with dontcompress, etc etc.
                if(minCompatMode == CompatibilityMode.COMPAT_UNKNOWN ||
                        !(minCompatMode.ordinal() > topCompatibilityMode || maxCompatMode.ordinal() < topCompatibilityMode)) {
                    minCompatMode = maxCompatMode = CompatibilityMode.values()[topCompatibilityMode];
                    dontCompress = topDontCompress;
                } else
                    throw new FetchException(FetchExceptionMode.INVALID_METADATA, "Top compatibility mode is incompatible with detected compatibility mode");
            }
            // We assume we are the bottom layer.
            // If the top-block stats are passed in then we can safely say the report is definitive.
            fetcher.onSplitfileCompatibilityMode(minCompatMode, maxCompatMode, metadata.getCustomSplitfileKey(), dontCompress, true, topCompatibilityMode != 0);

            if((blocksPerSegment > origFetchContext.maxDataBlocksPerSegment)
                    || (checkBlocksPerSegment > origFetchContext.maxCheckBlocksPerSegment))
                throw new FetchException(FetchExceptionMode.TOO_MANY_BLOCKS_PER_SEGMENT, "Too many blocks per segment: "+blocksPerSegment+" data, "+checkBlocksPerSegment+" check");
           
               
        } else throw new MetadataParseException("Unknown splitfile format: "+splitfileType);

        if(logMINOR)
            Logger.minor(this, "Algorithm: "+splitfileType+", blocks per segment: "+blocksPerSegment+
                    ", check blocks per segment: "+checkBlocksPerSegment+", segments: "+segmentCount+
                    ", data blocks: "+splitfileDataBlocks+", check blocks: "+splitfileCheckBlocks);
        segments = new SplitFileFetcherSegmentStorage[segmentCount]; // initially null on all entries
       
        long checkLength = 1L * (splitfileDataBlocks - segmentCount * crossCheckBlocks) * CHKBlock.DATA_LENGTH;
        if(checkLength > finalLength) {
            if(checkLength - finalLength > CHKBlock.DATA_LENGTH)
                throw new FetchException(FetchExceptionMode.INVALID_METADATA, "Splitfile is "+checkLength+" bytes long but length is "+finalLength+" bytes");
        }
       
        byte[] localSalt = new byte[32];
        random.nextBytes(localSalt);
       
        keyListener = new SplitFileFetcherKeyListener(fetcher, this, false,
                localSalt, splitfileDataBlocks + totalCrossCheckBlocks + splitfileCheckBlocks, blocksPerSegment +
                checkBlocksPerSegment, segmentCount);

        finalMinCompatMode = minCompatMode;
       
        this.offsetKeyList = storedBlocksLength + storedCrossCheckBlocksLength;
        this.offsetSegmentStatus = offsetKeyList + storedKeysLength;
       
        byte[] generalProgress = encodeGeneralProgress();
       
        if(persistent) {
            offsetGeneralProgress = offsetSegmentStatus + storedSegmentStatusLength;
            this.offsetMainBloomFilter = offsetGeneralProgress + generalProgress.length;
            this.offsetSegmentBloomFilters = offsetMainBloomFilter + keyListener.paddedMainBloomFilterSize();
            this.offsetOriginalMetadata = offsetSegmentBloomFilters +
                keyListener.totalSegmentBloomFiltersSize();
        } else {
            // Don't store anything except the blocks and the key list.
            offsetGeneralProgress = offsetMainBloomFilter = offsetSegmentBloomFilters = offsetOriginalMetadata = offsetSegmentStatus;
        }
           
       
        long dataOffset = 0;
        long crossCheckBlocksOffset = storedBlocksLength; // Only used if completeViaTruncation
        long segmentKeysOffset = offsetKeyList;
        long segmentStatusOffset = offsetSegmentStatus;
       
        for(int i=0;i<segments.length;i++) {
            // splitfile* will be overwritten, this is bad
            // so copy them
            SplitFileSegmentKeys keys = segmentKeys[i];
            // Segment keys getDataBlocks() includes cross-check blocks
            final int dataBlocks = keys.getDataBlocks() - crossCheckBlocks;
            final int checkBlocks = keys.getCheckBlocks();
            if((dataBlocks > origFetchContext.maxDataBlocksPerSegment)
                    || (checkBlocks > origFetchContext.maxCheckBlocksPerSegment))
                throw new FetchException(FetchExceptionMode.TOO_MANY_BLOCKS_PER_SEGMENT, "Too many blocks per segment: "+blocksPerSegment+" data, "+checkBlocksPerSegment+" check");
            segments[i] = new SplitFileFetcherSegmentStorage(this, i, splitfileType,
                    dataBlocks,
                    checkBlocks, crossCheckBlocks, dataOffset,
                    completeViaTruncation ? crossCheckBlocksOffset : -1, // Put at end if truncating.
                    segmentKeysOffset, segmentStatusOffset,
                    maxRetries != -1, keys, keysFetching);
            dataOffset += dataBlocks * CHKBlock.DATA_LENGTH;
            if(!completeViaTruncation) {
                dataOffset += crossCheckBlocks * CHKBlock.DATA_LENGTH;
            } else {
                crossCheckBlocksOffset += crossCheckBlocks * CHKBlock.DATA_LENGTH;
            }
            segmentKeysOffset +=
                SplitFileFetcherSegmentStorage.storedKeysLength(dataBlocks+crossCheckBlocks, checkBlocks, splitfileSingleCryptoKey != null, checksumLength);
            segmentStatusOffset +=
                SplitFileFetcherSegmentStorage.paddedStoredSegmentStatusLength(dataBlocks, checkBlocks,
                        crossCheckBlocks, maxRetries != -1, checksumLength, persistent);
            for(int j=0;j<(dataBlocks+crossCheckBlocks+checkBlocks);j++) {
                keyListener.addKey(keys.getKey(j, null, false).getNodeKey(false), i, salt);
            }
            if(logDEBUG) Logger.debug(this, "Segment "+i+": data blocks offset "+
                    segments[i].segmentBlockDataOffset+" cross-check blocks offset "+segments[i].segmentCrossCheckBlockDataOffset+" for segment "+i+" of "+this);
        }
        assert(dataOffset == storedBlocksLength);
        if(completeViaTruncation)
            assert(crossCheckBlocksOffset == storedCrossCheckBlocksLength + storedBlocksLength);
        assert(segmentKeysOffset == storedBlocksLength + storedCrossCheckBlocksLength + storedKeysLength);
        assert(segmentStatusOffset == storedBlocksLength + storedCrossCheckBlocksLength + storedKeysLength + storedSegmentStatusLength);
        /* Lie about the required number of blocks. For a cross-segment splitfile, the actual
         * number of blocks needed is somewhere between splitfileDataBlocks and
         * splitfileDataBlocks + totalCrossCheckBlocks depending on what order we fetch them in.
         * Progress over 100% is apparently more annoying than finishing at 98%... */
        fetcher.setSplitfileBlocks(splitfileDataBlocks + totalCrossCheckBlocks, splitfileCheckBlocks);
       
        keyListener.finishedSetup();
       
        if(crossCheckBlocks != 0) {
            Random crossSegmentRandom = new MersenneTwister(Metadata.getCrossSegmentSeed(metadata.getHashes(), metadata.getHashThisLayerOnly()));
            // Cross segment redundancy: Allocate the blocks.
            crossSegments = new SplitFileFetcherCrossSegmentStorage[segments.length];
            int segLen = blocksPerSegment;
            int deductBlocksFromSegments = metadata.getDeductBlocksFromSegments();
            for(int i=0;i<crossSegments.length;i++) {
                Logger.normal(this, "Allocating blocks (on fetch) for cross segment "+i);
                if(segments.length - i == deductBlocksFromSegments) {
                    segLen--;
                }
                SplitFileFetcherCrossSegmentStorage seg =
                    new SplitFileFetcherCrossSegmentStorage(i, segLen, crossCheckBlocks, this, fecCodec);
                crossSegments[i] = seg;
                for(int j=0;j<segLen;j++) {
                    // Allocate random data blocks
                    allocateCrossDataBlock(seg, crossSegmentRandom);
                }
                for(int j=0;j<crossCheckBlocks;j++) {
                    // Allocate check blocks
                    allocateCrossCheckBlock(seg, crossSegmentRandom);
                }
            }
        } else {
            crossSegments = null;
        }
       
        long totalLength;
        Bucket metadataTemp;
        byte[] encodedURI;
        byte[] encodedBasicSettings;
        if(persistent) {
            // Write the metadata to a temporary file to get its exact length.
            metadataTemp = tempBucketFactory.makeBucket(-1);
            OutputStream os = metadataTemp.getOutputStream();
            OutputStream cos = checksumOutputStream(os);
            BufferedOutputStream bos = new BufferedOutputStream(cos);
            try {
                // Need something bigger than a CRC for this...
                MultiHashOutputStream mos = new MultiHashOutputStream(bos, HashType.SHA256.bitmask);
                metadata.writeTo(new DataOutputStream(mos));
                mos.getResults()[0].writeTo(bos);
            } catch (MetadataUnresolvedException e) {
                throw new FetchException(FetchExceptionMode.INTERNAL_ERROR, "Metadata not resolved starting splitfile fetch?!: "+e, e);
            }
            bos.close();
            long metadataLength = metadataTemp.size();
            offsetOriginalDetails = offsetOriginalMetadata + metadataLength;
           
View Full Code Here

            try {
                segment.readMetadata();
                if(segment.hasFailed()) {
                    raf.close();
                    raf.free(); // Failed, so free it.
                    throw new FetchException(FetchExceptionMode.SPLITFILE_ERROR, errors);
                }
            } catch (ChecksumFailedException e) {
                Logger.error(this, "Progress for segment "+segment.segNo+" on "+this+" corrupted.");
                needsDecode = true;
            }
View Full Code Here

    private void maybeComplete() {
        if(allSucceeded()) {
            callSuccessOffThread();
        } else if(allFinished() && !allSucceeded()) {
            // Some failed.
            fail(new FetchException(FetchExceptionMode.SPLITFILE_ERROR, errors));
        }
    }
View Full Code Here

        }
        if(lateCompletion) {
            // We have not called onSuccess() or fail() yet.
            if(allFinished() && !allSucceeded()) {
                // No more blocks will be found, so fail *now*.
                fail(new FetchException(FetchExceptionMode.SPLITFILE_ERROR, errors));
            } else {
                if(completeViaTruncation) raf.close();
                maybeComplete();
                return;
            }
View Full Code Here

    /** A segment ran out of retries. We have given up on that segment and therefore on the whole
     * splitfile.
     * @param segment The segment that failed.
     */
    public void failOnSegment(SplitFileFetcherSegmentStorage segment) {
        fail(new FetchException(FetchExceptionMode.SPLITFILE_ERROR, errors));
    }
View Full Code Here

TOP

Related Classes of freenet.client.FetchException

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.