this.originalData = originalData;
this.callback = callback;
this.persistent = persistent;
dataLength = originalData.size();
if (dataLength > ((long) Integer.MAX_VALUE) * CHKBlock.DATA_LENGTH)
throw new InsertException(InsertExceptionMode.TOO_BIG);
totalDataBlocks = (int) ((dataLength + CHKBlock.DATA_LENGTH - 1) / CHKBlock.DATA_LENGTH);
this.decompressedLength = decompressedLength;
this.compressionCodec = compressionCodec;
this.clientMetadata = meta;
this.checker = checker;
this.memoryLimitedJobRunner = memoryLimitedJobRunner;
this.jobRunner = jobRunner;
this.isMetadata = isMetadata;
this.archiveType = archiveType;
this.hashThisLayerOnly = hashThisLayerOnly;
this.topDontCompress = topDontCompress;
this.origDataSize = origDataSize;
this.origCompressedDataSize = origCompressedDataSize;
this.maxRetries = ctx.maxInsertRetries;
this.errors = new FailureCodeTracker(true);
this.ticker = ticker;
this.random = random;
// Work out how many blocks in each segment, crypto keys etc.
// Complicated by back compatibility, i.e. the need to be able to
// reinsert old splitfiles.
// FIXME consider getting rid of support for very old splitfiles.
int segs;
cmode = ctx.getCompatibilityMode();
if(cmode.ordinal() < CompatibilityMode.COMPAT_1255.ordinal()) {
this.hashes = null;
splitfileCryptoKey = null;
} else {
this.hashes = hashes;
}
if (cmode == CompatibilityMode.COMPAT_1250_EXACT) {
segs = (totalDataBlocks + 128 - 1) / 128;
segmentSize = 128;
deductBlocksFromSegments = 0;
} else {
if (cmode == CompatibilityMode.COMPAT_1251) {
// Max 131 blocks per segment.
segs = (totalDataBlocks + 131 - 1) / 131;
} else {
// Algorithm from evanbd, see bug #2931.
if (totalDataBlocks > 520) {
segs = (totalDataBlocks + 128 - 1) / 128;
} else if (totalDataBlocks > 393) {
// maxSegSize = 130;
segs = 4;
} else if (totalDataBlocks > 266) {
// maxSegSize = 131;
segs = 3;
} else if (totalDataBlocks > 136) {
// maxSegSize = 133;
segs = 2;
} else {
// maxSegSize = 136;
segs = 1;
}
}
int segSize = (totalDataBlocks + segs - 1) / segs;
if (ctx.splitfileSegmentDataBlocks < segSize) {
segs = (totalDataBlocks + ctx.splitfileSegmentDataBlocks - 1)
/ ctx.splitfileSegmentDataBlocks;
segSize = (totalDataBlocks + segs - 1) / segs;
}
segmentSize = segSize;
if (cmode == CompatibilityMode.COMPAT_CURRENT
|| cmode.ordinal() >= CompatibilityMode.COMPAT_1255.ordinal()) {
// Even with basic even segment splitting, it is possible for
// the last segment to be a lot smaller than the rest.
// So drop a single data block from each of the last
// [segmentSize-lastSegmentSize] segments instead.
// Hence all the segments are within 1 block of segmentSize.
int lastSegmentSize = totalDataBlocks - (segmentSize * (segs - 1));
deductBlocksFromSegments = segmentSize - lastSegmentSize;
} else {
deductBlocksFromSegments = 0;
}
}
int crossCheckBlocks = 0;
// Cross-segment splitfile redundancy becomes useful at 20 segments.
if (segs >= 20
&& (cmode == CompatibilityMode.COMPAT_CURRENT || cmode.ordinal() >= CompatibilityMode.COMPAT_1255
.ordinal())) {
// The optimal number of cross-check blocks per segment (and per
// cross-segment since there are the same number of cross-segments
// as segments) is 3.
crossCheckBlocks = 3;
}
this.crossCheckBlocks = crossCheckBlocks;
this.splitfileType = ctx.getSplitfileAlgorithm();
this.codec = FECCodec.getInstance(splitfileType);
checkSegmentSize = codec.getCheckBlocks(segmentSize + crossCheckBlocks, cmode);
this.splitfileCryptoAlgorithm = splitfileCryptoAlgorithm;
if (splitfileCryptoKey != null) {
this.splitfileCryptoKey = splitfileCryptoKey;
specifySplitfileKeyInMetadata = true;
} else if (cmode == CompatibilityMode.COMPAT_CURRENT
|| cmode.ordinal() >= CompatibilityMode.COMPAT_1255.ordinal()) {
if (hashThisLayerOnly != null) {
this.splitfileCryptoKey = Metadata.getCryptoKey(hashThisLayerOnly);
} else {
this.splitfileCryptoKey = Metadata.getCryptoKey(hashes);
}
specifySplitfileKeyInMetadata = false;
} else {
this.splitfileCryptoKey = null;
specifySplitfileKeyInMetadata = false;
}
int totalCheckBlocks = 0;
int checkTotalDataBlocks = 0;
underlyingOffsetDataSegments = new long[segs];
keyLength = SplitFileInserterSegmentStorage.getKeyLength(this);
this.consecutiveRNFsCountAsSuccess = ctx.consecutiveRNFsCountAsSuccess;
segments = makeSegments(segmentSize, segs, totalDataBlocks, crossCheckBlocks,
deductBlocksFromSegments, persistent,
cmode, random, keysFetching, consecutiveRNFsCountAsSuccess);
for (SplitFileInserterSegmentStorage segment : segments) {
totalCheckBlocks += segment.checkBlockCount;
checkTotalDataBlocks += segment.dataBlockCount;
}
assert (checkTotalDataBlocks == totalDataBlocks);
this.totalCheckBlocks = totalCheckBlocks;
if (crossCheckBlocks != 0) {
byte[] seed = Metadata.getCrossSegmentSeed(hashes, hashThisLayerOnly);
if (logMINOR)
Logger.minor(this, "Cross-segment seed: " + HexUtil.bytesToHex(seed));
Random xsRandom = new MersenneTwister(seed);
// Cross segment redundancy: Allocate the blocks.
crossSegments = new SplitFileInserterCrossSegmentStorage[segs];
int segLen = segmentSize;
for (int i = 0; i < crossSegments.length; i++) {
if (logMINOR)
Logger.minor(this, "Allocating blocks for cross segment " + i);
if (segments.length - i == deductBlocksFromSegments) {
segLen--;
}
SplitFileInserterCrossSegmentStorage seg = new SplitFileInserterCrossSegmentStorage(
this, i, persistent, segLen, crossCheckBlocks);
crossSegments[i] = seg;
for (int j = 0; j < segLen; j++) {
// Allocate random data blocks
allocateCrossDataBlock(seg, xsRandom);
}
for (int j = 0; j < crossCheckBlocks; j++) {
// Allocate check blocks
allocateCrossCheckBlock(seg, xsRandom);
}
}
} else {
crossSegments = null;
}
// Now set up the RAF.
// Setup offset arrays early so we can compute the length of encodeOffsets().
if(crossSegments != null) {
offsetCrossSegmentBlocks = new long[crossSegments.length];
if(persistent)
offsetCrossSegmentStatus = new long[crossSegments.length];
else
offsetCrossSegmentStatus = null;
} else {
offsetCrossSegmentBlocks = null;
offsetCrossSegmentStatus = null;
}
offsetSegmentCheckBlocks = new long[segments.length];
offsetSegmentKeys = new long[segments.length];
if(persistent) {
offsetSegmentStatus = new long[segments.length];
} else {
offsetSegmentStatus = null;
}
// First we have all the fixed stuff ...
byte[] paddedLastBlock = null;
if (dataLength % CHKBlock.DATA_LENGTH != 0) {
this.hasPaddedLastBlock = true;
long from = (dataLength / CHKBlock.DATA_LENGTH) * CHKBlock.DATA_LENGTH;
byte[] buf = new byte[(int) (dataLength - from)];
this.originalData.pread(from, buf, 0, buf.length);
paddedLastBlock = BucketTools.pad(buf, CHKBlock.DATA_LENGTH, buf.length);
} else {
this.hasPaddedLastBlock = false;
}
byte[] header = null;
Bucket segmentSettings = null, crossSegmentSettings = null;
int offsetsLength = 0;
if (persistent) {
header = encodeHeader();
offsetsLength = encodeOffsets().length;
segmentSettings = encodeSegmentSettings(); // Checksummed with length
try {
crossSegmentSettings = encodeCrossSegmentSettings(bf); // Checksummed with length
} catch (IOException e) {
throw new InsertException(InsertExceptionMode.BUCKET_ERROR,
"Failed to write to temporary storage while creating splitfile inserter",
null);
}
}