}
}
private void testRoundTripSimpleRandom(long size, CompatibilityMode cmode) throws IOException, InsertException, MissingKeyException, FetchException, MetadataParseException, Exception {
RandomSource r = new DummyRandomSource(12123);
LockableRandomAccessBuffer data = generateData(r, size, smallRAFFactory);
Bucket dataBucket = new RAFBucket(data);
HashResult[] hashes = getHashes(data);
MyCallback cb = new MyCallback();
InsertContext context = baseContext.clone();
context.earlyEncode = true;
context.setCompatibilityMode(cmode);
cmode = context.getCompatibilityMode();
KeysFetchingLocally keys = new MyKeysFetchingLocally();
boolean old = cmode.code < CompatibilityMode.COMPAT_1255.code;
byte cryptoAlgorithm = this.cryptoAlgorithm;
if(!(cmode == CompatibilityMode.COMPAT_CURRENT || cmode.ordinal() >= CompatibilityMode.COMPAT_1416.ordinal()))
cryptoAlgorithm = Key.ALGO_AES_PCFB_256_SHA256;
else
cryptoAlgorithm = Key.ALGO_AES_CTR_256_SHA256;
SplitFileInserterStorage storage = new SplitFileInserterStorage(data, size, cb, null,
new ClientMetadata(), false, null, smallRAFFactory, false, context,
cryptoAlgorithm, old ? null : cryptoKey, null, hashes, smallBucketFactory, checker,
r, memoryLimitedJobRunner, jobRunner, ticker, keys, false, 0, 0, 0, 0);
storage.start();
cb.waitForFinishedEncode();
assertTrue(storage.getStatus() == Status.ENCODED);
// Encoded. Now try to decode it ...
cb.waitForHasKeys();
Metadata metadata = storage.encodeMetadata();
// Ugly hack because Metadata behaves oddly.
// FIXME make Metadata behave consistently and get rid.
Bucket metaBucket = metadata.toBucket(smallBucketFactory);
Metadata m1 = Metadata.construct(metaBucket);
Bucket copyBucket = m1.toBucket(smallBucketFactory);
assertTrue(BucketTools.equalBuckets(metaBucket, copyBucket));
MyFetchCallback fcb = new MyFetchCallback();
FetchContext fctx = HighLevelSimpleClientImpl.makeDefaultFetchContext(size*2, size*2, smallBucketFactory, new SimpleEventProducer());
SplitFileFetcherStorage fetcherStorage = new SplitFileFetcherStorage(m1, fcb, new ArrayList<COMPRESSOR_TYPE>(),
new ClientMetadata(), false, cmode.code, fctx, false, salt, URI, URI, true, new byte[0],
r, smallBucketFactory, smallRAFFactory, jobRunner, ticker, memoryLimitedJobRunner,
checker, false, null, null, keys);
fetcherStorage.start(false);
// Fully decode one segment at a time, ignore cross-segment.
for(int i=0;i<storage.segments.length;i++) {
SplitFileFetcherSegmentStorage fetcherSegment = fetcherStorage.segments[i];
SplitFileInserterSegmentStorage inserterSegment = storage.segments[i];
int minBlocks = inserterSegment.dataBlockCount + inserterSegment.crossCheckBlockCount;
int totalBlocks = inserterSegment.totalBlockCount;
boolean[] fetched = new boolean[totalBlocks];
if(i == storage.segments.length-1 && cmode.ordinal() < CompatibilityMode.COMPAT_1255.ordinal())
fetched[inserterSegment.dataBlockCount-1] = true; // We don't use the last block of the last segment for old splitfiles
for(int j=0;j<minBlocks;j++) {
int blockNo;
do {
blockNo = r.nextInt(totalBlocks);
} while (fetched[blockNo]);
fetched[blockNo] = true;
ClientCHKBlock block = inserterSegment.encodeBlock(blockNo);
assertFalse(fetcherSegment.hasStartedDecode());
boolean success = fetcherSegment.onGotKey(block.getClientKey().getNodeCHK(), block.getBlock());