int segmentCount = metadata.getSegmentCount();
if(splitfileType == SplitfileAlgorithm.NONREDUNDANT) {
if(splitfileCheckBlocks > 0) {
Logger.error(this, "Splitfile type is SPLITFILE_NONREDUNDANT yet "+splitfileCheckBlocks+" check blocks found!! : "+this);
throw new FetchException(FetchExceptionMode.INVALID_METADATA, "Splitfile type is non-redundant yet have "+splitfileCheckBlocks+" check blocks");
}
} else if(splitfileType == SplitfileAlgorithm.ONION_STANDARD) {
boolean dontCompress = decompressors.isEmpty();
if(topCompatibilityMode != 0) {
// If we have top compatibility mode, then we can give a definitive answer immediately, with the splitfile key, with dontcompress, etc etc.
if(minCompatMode == CompatibilityMode.COMPAT_UNKNOWN ||
!(minCompatMode.ordinal() > topCompatibilityMode || maxCompatMode.ordinal() < topCompatibilityMode)) {
minCompatMode = maxCompatMode = CompatibilityMode.values()[topCompatibilityMode];
dontCompress = topDontCompress;
} else
throw new FetchException(FetchExceptionMode.INVALID_METADATA, "Top compatibility mode is incompatible with detected compatibility mode");
}
// We assume we are the bottom layer.
// If the top-block stats are passed in then we can safely say the report is definitive.
fetcher.onSplitfileCompatibilityMode(minCompatMode, maxCompatMode, metadata.getCustomSplitfileKey(), dontCompress, true, topCompatibilityMode != 0);
if((blocksPerSegment > origFetchContext.maxDataBlocksPerSegment)
|| (checkBlocksPerSegment > origFetchContext.maxCheckBlocksPerSegment))
throw new FetchException(FetchExceptionMode.TOO_MANY_BLOCKS_PER_SEGMENT, "Too many blocks per segment: "+blocksPerSegment+" data, "+checkBlocksPerSegment+" check");
} else throw new MetadataParseException("Unknown splitfile format: "+splitfileType);
if(logMINOR)
Logger.minor(this, "Algorithm: "+splitfileType+", blocks per segment: "+blocksPerSegment+
", check blocks per segment: "+checkBlocksPerSegment+", segments: "+segmentCount+
", data blocks: "+splitfileDataBlocks+", check blocks: "+splitfileCheckBlocks);
segments = new SplitFileFetcherSegmentStorage[segmentCount]; // initially null on all entries
long checkLength = 1L * (splitfileDataBlocks - segmentCount * crossCheckBlocks) * CHKBlock.DATA_LENGTH;
if(checkLength > finalLength) {
if(checkLength - finalLength > CHKBlock.DATA_LENGTH)
throw new FetchException(FetchExceptionMode.INVALID_METADATA, "Splitfile is "+checkLength+" bytes long but length is "+finalLength+" bytes");
}
byte[] localSalt = new byte[32];
random.nextBytes(localSalt);
keyListener = new SplitFileFetcherKeyListener(fetcher, this, false,
localSalt, splitfileDataBlocks + totalCrossCheckBlocks + splitfileCheckBlocks, blocksPerSegment +
checkBlocksPerSegment, segmentCount);
finalMinCompatMode = minCompatMode;
this.offsetKeyList = storedBlocksLength + storedCrossCheckBlocksLength;
this.offsetSegmentStatus = offsetKeyList + storedKeysLength;
byte[] generalProgress = encodeGeneralProgress();
if(persistent) {
offsetGeneralProgress = offsetSegmentStatus + storedSegmentStatusLength;
this.offsetMainBloomFilter = offsetGeneralProgress + generalProgress.length;
this.offsetSegmentBloomFilters = offsetMainBloomFilter + keyListener.paddedMainBloomFilterSize();
this.offsetOriginalMetadata = offsetSegmentBloomFilters +
keyListener.totalSegmentBloomFiltersSize();
} else {
// Don't store anything except the blocks and the key list.
offsetGeneralProgress = offsetMainBloomFilter = offsetSegmentBloomFilters = offsetOriginalMetadata = offsetSegmentStatus;
}
long dataOffset = 0;
long crossCheckBlocksOffset = storedBlocksLength; // Only used if completeViaTruncation
long segmentKeysOffset = offsetKeyList;
long segmentStatusOffset = offsetSegmentStatus;
for(int i=0;i<segments.length;i++) {
// splitfile* will be overwritten, this is bad
// so copy them
SplitFileSegmentKeys keys = segmentKeys[i];
// Segment keys getDataBlocks() includes cross-check blocks
final int dataBlocks = keys.getDataBlocks() - crossCheckBlocks;
final int checkBlocks = keys.getCheckBlocks();
if((dataBlocks > origFetchContext.maxDataBlocksPerSegment)
|| (checkBlocks > origFetchContext.maxCheckBlocksPerSegment))
throw new FetchException(FetchExceptionMode.TOO_MANY_BLOCKS_PER_SEGMENT, "Too many blocks per segment: "+blocksPerSegment+" data, "+checkBlocksPerSegment+" check");
segments[i] = new SplitFileFetcherSegmentStorage(this, i, splitfileType,
dataBlocks,
checkBlocks, crossCheckBlocks, dataOffset,
completeViaTruncation ? crossCheckBlocksOffset : -1, // Put at end if truncating.
segmentKeysOffset, segmentStatusOffset,
maxRetries != -1, keys, keysFetching);
dataOffset += dataBlocks * CHKBlock.DATA_LENGTH;
if(!completeViaTruncation) {
dataOffset += crossCheckBlocks * CHKBlock.DATA_LENGTH;
} else {
crossCheckBlocksOffset += crossCheckBlocks * CHKBlock.DATA_LENGTH;
}
segmentKeysOffset +=
SplitFileFetcherSegmentStorage.storedKeysLength(dataBlocks+crossCheckBlocks, checkBlocks, splitfileSingleCryptoKey != null, checksumLength);
segmentStatusOffset +=
SplitFileFetcherSegmentStorage.paddedStoredSegmentStatusLength(dataBlocks, checkBlocks,
crossCheckBlocks, maxRetries != -1, checksumLength, persistent);
for(int j=0;j<(dataBlocks+crossCheckBlocks+checkBlocks);j++) {
keyListener.addKey(keys.getKey(j, null, false).getNodeKey(false), i, salt);
}
if(logDEBUG) Logger.debug(this, "Segment "+i+": data blocks offset "+
segments[i].segmentBlockDataOffset+" cross-check blocks offset "+segments[i].segmentCrossCheckBlockDataOffset+" for segment "+i+" of "+this);
}
assert(dataOffset == storedBlocksLength);
if(completeViaTruncation)
assert(crossCheckBlocksOffset == storedCrossCheckBlocksLength + storedBlocksLength);
assert(segmentKeysOffset == storedBlocksLength + storedCrossCheckBlocksLength + storedKeysLength);
assert(segmentStatusOffset == storedBlocksLength + storedCrossCheckBlocksLength + storedKeysLength + storedSegmentStatusLength);
/* Lie about the required number of blocks. For a cross-segment splitfile, the actual
* number of blocks needed is somewhere between splitfileDataBlocks and
* splitfileDataBlocks + totalCrossCheckBlocks depending on what order we fetch them in.
* Progress over 100% is apparently more annoying than finishing at 98%... */
fetcher.setSplitfileBlocks(splitfileDataBlocks + totalCrossCheckBlocks, splitfileCheckBlocks);
keyListener.finishedSetup();
if(crossCheckBlocks != 0) {
Random crossSegmentRandom = new MersenneTwister(Metadata.getCrossSegmentSeed(metadata.getHashes(), metadata.getHashThisLayerOnly()));
// Cross segment redundancy: Allocate the blocks.
crossSegments = new SplitFileFetcherCrossSegmentStorage[segments.length];
int segLen = blocksPerSegment;
int deductBlocksFromSegments = metadata.getDeductBlocksFromSegments();
for(int i=0;i<crossSegments.length;i++) {
Logger.normal(this, "Allocating blocks (on fetch) for cross segment "+i);
if(segments.length - i == deductBlocksFromSegments) {
segLen--;
}
SplitFileFetcherCrossSegmentStorage seg =
new SplitFileFetcherCrossSegmentStorage(i, segLen, crossCheckBlocks, this, fecCodec);
crossSegments[i] = seg;
for(int j=0;j<segLen;j++) {
// Allocate random data blocks
allocateCrossDataBlock(seg, crossSegmentRandom);
}
for(int j=0;j<crossCheckBlocks;j++) {
// Allocate check blocks
allocateCrossCheckBlock(seg, crossSegmentRandom);
}
}
} else {
crossSegments = null;
}
long totalLength;
Bucket metadataTemp;
byte[] encodedURI;
byte[] encodedBasicSettings;
if(persistent) {
// Write the metadata to a temporary file to get its exact length.
metadataTemp = tempBucketFactory.makeBucket(-1);
OutputStream os = metadataTemp.getOutputStream();
OutputStream cos = checksumOutputStream(os);
BufferedOutputStream bos = new BufferedOutputStream(cos);
try {
// Need something bigger than a CRC for this...
MultiHashOutputStream mos = new MultiHashOutputStream(bos, HashType.SHA256.bitmask);
metadata.writeTo(new DataOutputStream(mos));
mos.getResults()[0].writeTo(bos);
} catch (MetadataUnresolvedException e) {
throw new FetchException(FetchExceptionMode.INTERNAL_ERROR, "Metadata not resolved starting splitfile fetch?!: "+e, e);
}
bos.close();
long metadataLength = metadataTemp.size();
offsetOriginalDetails = offsetOriginalMetadata + metadataLength;