Package freenet.support.io

Examples of freenet.support.io.ArrayBucketFactory


        try {
            this.checker = ChecksumChecker.create(checksumType);
        } catch (IllegalArgumentException e) {
            throw new StorageFormatException("Bad checksum type");
        }
        InputStream is = checker.checksumReaderWithLength(ois, new ArrayBucketFactory(), 1024*1024);
        dis = new DataInputStream(is);
        int version = dis.readInt();
        if(version != VERSION)
            throw new StorageFormatException("Bad version");
        LockableRandomAccessBuffer rafOrig = BucketTools.restoreRAFFrom(dis, persistentFG, persistentFileTracker, masterKey);
        if(originalData == null) {
            this.originalData = rafOrig;
        } else {
            // Check that it's the same, but use the passed-in one.
            if(!originalData.equals(rafOrig))
                throw new StorageFormatException("Original data restored from different filename! Expected "+originalData+" but restored "+rafOrig);
            this.originalData = originalData;
        }
        this.totalDataBlocks = dis.readInt();
        if(totalDataBlocks <= 0) throw new StorageFormatException("Bad total data blocks "+totalDataBlocks);
        this.totalCheckBlocks = dis.readInt();
        if(totalCheckBlocks <= 0) throw new StorageFormatException("Bad total data blocks "+totalCheckBlocks);
        try {
            this.splitfileType = SplitfileAlgorithm.getByCode(dis.readShort());
        } catch (IllegalArgumentException e) {
            throw new StorageFormatException("Bad splitfile type");
        }
        try {
            this.codec = FECCodec.getInstance(splitfileType);
        } catch (IllegalArgumentException e) {
            throw new StorageFormatException("Bad splitfile codec type");
        }
        this.dataLength = dis.readLong();
        if(dataLength <= 0) throw new StorageFormatException("Bad data length");
        if(dataLength != originalData.size())
            throw new ResumeFailedException("Original data size is "+originalData.size()+" should be "+dataLength);
        if(((dataLength + CHKBlock.DATA_LENGTH - 1) / CHKBlock.DATA_LENGTH) != totalDataBlocks)
            throw new StorageFormatException("Data blocks "+totalDataBlocks+" not compatible with size "+dataLength);
        decompressedLength = dis.readLong();
        if(decompressedLength <= 0)
            throw new StorageFormatException("Bogus decompressed length");
        isMetadata = dis.readBoolean();
        short atype = dis.readShort();
        if(atype == -1) {
            archiveType = null;
        } else {
            archiveType = ARCHIVE_TYPE.getArchiveType(atype);
            if(archiveType == null) throw new StorageFormatException("Unknown archive type "+atype);
        }
        try {
            clientMetadata = ClientMetadata.construct(dis);
        } catch (MetadataParseException e) {
            throw new StorageFormatException("Failed to read MIME type: "+e);
        }
        short codec = dis.readShort();
        if(codec == (short)-1)
            compressionCodec = null;
        else {
            compressionCodec = COMPRESSOR_TYPE.getCompressorByMetadataID(codec);
            if(compressionCodec == null)
                throw new StorageFormatException("Unknown compression codec ID "+codec);
        }
        int segmentCount = dis.readInt();
        if(segmentCount <= 0) throw new StorageFormatException("Bad segment count");
        this.segmentSize = dis.readInt();
        if(segmentSize <= 0) throw new StorageFormatException("Bad segment size");
        this.checkSegmentSize = dis.readInt();
        if(checkSegmentSize <= 0) throw new StorageFormatException("Bad check segment size");
        this.crossCheckBlocks = dis.readInt();
        if(crossCheckBlocks < 0) throw new StorageFormatException("Bad cross-check block count");
        if(segmentSize + checkSegmentSize + crossCheckBlocks > FECCodec.MAX_TOTAL_BLOCKS_PER_SEGMENT)
            throw new StorageFormatException("Must be no more than "+FECCodec.MAX_TOTAL_BLOCKS_PER_SEGMENT+" blocks per segment");
        this.splitfileCryptoAlgorithm = dis.readByte();
        if(!Metadata.isValidSplitfileCryptoAlgorithm(splitfileCryptoAlgorithm))
            throw new StorageFormatException("Invalid splitfile crypto algorithm "+splitfileCryptoAlgorithm);
        if(dis.readBoolean()) {
            splitfileCryptoKey = new byte[32];
            dis.readFully(splitfileCryptoKey);
        } else {
            splitfileCryptoKey = null;
        }
        this.keyLength = dis.readInt(); // FIXME validate
        if(keyLength < SplitFileInserterSegmentStorage.getKeyLength(this))
            throw new StorageFormatException("Invalid key length "+keyLength+" should be at least "+
                    SplitFileInserterSegmentStorage.getKeyLength(this));
        int compatMode = dis.readInt();
        if(compatMode < 0 || compatMode > CompatibilityMode.values().length)
            throw new StorageFormatException("Invalid compatibility mode "+compatMode);
        this.cmode = CompatibilityMode.values()[compatMode];
        this.deductBlocksFromSegments = dis.readInt();
        if(deductBlocksFromSegments < 0 || deductBlocksFromSegments > segmentCount)
            throw new StorageFormatException("Bad deductBlocksFromSegments");
        this.maxRetries = dis.readInt();
        if(maxRetries < -1) throw new StorageFormatException("Bad maxRetries");
        this.consecutiveRNFsCountAsSuccess = dis.readInt();
        if(consecutiveRNFsCountAsSuccess < 0)
            throw new StorageFormatException("Bad consecutiveRNFsCountAsSuccess");
        specifySplitfileKeyInMetadata = dis.readBoolean();
        if(dis.readBoolean()) {
            hashThisLayerOnly = new byte[32];
            dis.readFully(hashThisLayerOnly);
        } else {
            hashThisLayerOnly = null;
        }
        topDontCompress = dis.readBoolean();
        topRequiredBlocks = dis.readInt();
        topTotalBlocks = dis.readInt();
        origDataSize = dis.readLong();
        origCompressedDataSize = dis.readLong();
        hashes = HashResult.readHashes(dis);
        dis.close();
        this.hasPaddedLastBlock = (dataLength % CHKBlock.DATA_LENGTH != 0);
        this.segments = new SplitFileInserterSegmentStorage[segmentCount];
        if(crossCheckBlocks != 0)
            this.crossSegments = new SplitFileInserterCrossSegmentStorage[segmentCount];
        else
            crossSegments = null;
        // Read offsets.
        is = checker.checksumReaderWithLength(ois, new ArrayBucketFactory(), 1024*1024);
        dis = new DataInputStream(is);
        if(hasPaddedLastBlock) {
            offsetPaddedLastBlock = readOffset(dis, rafLength, "offsetPaddedLastBlock");
        } else {
            offsetPaddedLastBlock = 0;
        }
        offsetOverallStatus = readOffset(dis, rafLength, "offsetOverallStatus");
        overallStatusLength = dis.readInt();
        if(overallStatusLength < 0) throw new StorageFormatException("Negative overall status length");
        if(overallStatusLength < FailureCodeTracker.getFixedLength(true))
            throw new StorageFormatException("Bad overall status length");
        // Will be read after offsets
        if(crossSegments != null) {
            offsetCrossSegmentBlocks = new long[crossSegments.length];
            for(int i=0;i<crossSegments.length;i++)
                offsetCrossSegmentBlocks[i] = readOffset(dis, rafLength, "cross-segment block offset");
        } else {
            offsetCrossSegmentBlocks = null;
        }
        offsetSegmentCheckBlocks = new long[segmentCount];
        for(int i=0;i<segmentCount;i++)
            offsetSegmentCheckBlocks[i] = readOffset(dis, rafLength, "segment check block offset");
        offsetSegmentStatus = new long[segmentCount];
        for(int i=0;i<segmentCount;i++)
            offsetSegmentStatus[i] = readOffset(dis, rafLength, "segment status offset");
        if(crossSegments != null) {
            offsetCrossSegmentStatus = new long[crossSegments.length];
            for(int i=0;i<crossSegments.length;i++)
                offsetCrossSegmentStatus[i] = readOffset(dis, rafLength, "cross-segment status offset");
        } else {
            offsetCrossSegmentStatus = null;
        }
        offsetSegmentKeys = new long[segmentCount];
        for(int i=0;i<segmentCount;i++)
            offsetSegmentKeys[i] = readOffset(dis, rafLength, "segment keys offset");
        dis.close();
        // Set up segments...
        underlyingOffsetDataSegments = new long[segmentCount];
        is = checker.checksumReaderWithLength(ois, new ArrayBucketFactory(), 1024*1024);
        dis = new DataInputStream(is);
        int blocks = 0;
        for(int i=0;i<segmentCount;i++) {
            segments[i] = new SplitFileInserterSegmentStorage(this, dis, i, keyLength,
                    splitfileCryptoAlgorithm, splitfileCryptoKey, random, maxRetries, consecutiveRNFsCountAsSuccess, keysFetching);
            underlyingOffsetDataSegments[i] = blocks * CHKBlock.DATA_LENGTH;
            blocks += segments[i].dataBlockCount;
            assert(underlyingOffsetDataSegments[i] < dataLength);
        }
        dis.close();
        if(blocks != totalDataBlocks)
            throw new StorageFormatException("Total data blocks should be "+totalDataBlocks+" but is "+blocks);
        if(crossSegments != null) {
            is = checker.checksumReaderWithLength(ois, new ArrayBucketFactory(), 1024*1024);
            dis = new DataInputStream(is);
            for(int i=0;i<crossSegments.length;i++) {
                crossSegments[i] = new SplitFileInserterCrossSegmentStorage(this, dis, i);
            }
            dis.close();
        }
        ois.close();
        ois = new RAFInputStream(raf, offsetOverallStatus, rafLength - offsetOverallStatus);
        dis = new DataInputStream(checker.checksumReaderWithLength(ois, new ArrayBucketFactory(), 1024*1024));
        errors = new FailureCodeTracker(true, dis);
        dis.close();
        for(SplitFileInserterSegmentStorage segment : segments) {
            segment.readStatus();
        }
View Full Code Here


    private byte[] encodeOverallStatus() {
        ArrayBucket bucket = new ArrayBucket(); // Will be small.
        try {
            OutputStream os = bucket.getOutputStream();
            OutputStream cos = checker.checksumWriterWithLength(os, new ArrayBucketFactory());
            DataOutputStream dos = new DataOutputStream(cos);
            synchronized(this) {
                errors.writeFixedLengthTo(dos);
                overallStatusDirty = false;
            }
View Full Code Here

    private Bucket encodeSegmentSettings() {
        ArrayBucket bucket = new ArrayBucket(); // Will be small.
        try {
            OutputStream os = bucket.getOutputStream();
            OutputStream cos = checker.checksumWriterWithLength(os, new ArrayBucketFactory());
            DataOutputStream dos = new DataOutputStream(cos);
            for (SplitFileInserterSegmentStorage segment : segments) {
                segment.writeFixedSettings(dos);
            }
            dos.close();
View Full Code Here

    private Bucket encodeCrossSegmentSettings(BucketFactory bf) throws IOException {
        if (crossSegments == null)
            return new NullBucket();
        Bucket bucket = bf.makeBucket(-1);
        OutputStream os = bucket.getOutputStream();
        OutputStream cos = checker.checksumWriterWithLength(os, new ArrayBucketFactory());
        DataOutputStream dos = new DataOutputStream(cos);
        for (SplitFileInserterCrossSegmentStorage segment : crossSegments) {
            segment.writeFixedSettings(dos);
        }
        dos.close();
View Full Code Here

        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        DataOutputStream dos = new DataOutputStream(baos);
        try {
            dos.writeLong(MAGIC);
            dos.writeInt(checker.getChecksumTypeID());
            OutputStream os = checker.checksumWriterWithLength(baos, new ArrayBucketFactory());
            dos = new DataOutputStream(os);
            dos.writeInt(VERSION);
            originalData.storeTo(dos);
            dos.writeInt(totalDataBlocks);
            dos.writeInt(totalCheckBlocks);
View Full Code Here

   
    /** Encode the offsets. */
    private byte[] encodeOffsets() {
        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        try {
            OutputStream os = checker.checksumWriterWithLength(baos, new ArrayBucketFactory());
            DataOutputStream dos = new DataOutputStream(os);
            if(this.hasPaddedLastBlock)
                dos.writeLong(offsetPaddedLastBlock);
            dos.writeLong(offsetOverallStatus);
            dos.writeInt(overallStatusLength);
View Full Code Here

        ticker = new CheatingTicker(executor);
        RandomSource r = new DummyRandomSource(12345);
        fg = new FilenameGenerator(r, true, dir, "freenet-test");
        persistentFileTracker = new TrivialPersistentFileTracker(dir, fg);
        bigRAFFactory = new PooledFileRandomAccessBufferFactory(fg, r);
        smallBucketFactory = new ArrayBucketFactory();
        bigBucketFactory = new TempBucketFactory(executor, fg, 0, 0, r, false, 0, null);
        baseContext = HighLevelSimpleClientImpl.makeDefaultInsertContext(bigBucketFactory, new SimpleEventProducer());
        cryptoKey = new byte[32];
        r.nextBytes(cryptoKey);
        checker = new CRCChecksumChecker();
View Full Code Here

    }

    // FProxy config needs to be here too
    SubConfig fproxyConfig = new SubConfig("fproxy", config);
    try {
      toadlets = new SimpleToadletServer(fproxyConfig, new ArrayBucketFactory(), executor, this);
      fproxyConfig.finishedInitialization();
      toadlets.start();
    } catch (IOException e4) {
      Logger.error(this, "Could not start web interface: "+e4, e4);
      System.err.println("Could not start web interface: "+e4);
View Full Code Here

        ticker = new CheatingTicker(executor);
        RandomSource r = new DummyRandomSource(12345);
        fg = new FilenameGenerator(r, true, dir, "freenet-test");
        persistentFileTracker = new TrivialPersistentFileTracker(dir, fg);
        bigRAFFactory = new PooledFileRandomAccessBufferFactory(fg, r);
        smallBucketFactory = new ArrayBucketFactory();
        bigBucketFactory = new TempBucketFactory(executor, fg, 0, 0, r, false, 0, null);
        baseContext = HighLevelSimpleClientImpl.makeDefaultInsertContext(bigBucketFactory, new SimpleEventProducer());
        cryptoKey = new byte[32];
        r.nextBytes(cryptoKey);
        checker = new CRCChecksumChecker();
View Full Code Here

TOP

Related Classes of freenet.support.io.ArrayBucketFactory

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.