package freenet.client.async;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.lang.ref.SoftReference;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Random;
import freenet.client.FetchException;
import freenet.client.FetchException.FetchExceptionMode;
import freenet.client.Metadata.SplitfileAlgorithm;
import freenet.client.async.PersistentJobRunner.CheckpointLock;
import freenet.crypt.ChecksumFailedException;
import freenet.keys.CHKBlock;
import freenet.keys.CHKDecodeException;
import freenet.keys.CHKEncodeException;
import freenet.keys.CHKVerifyException;
import freenet.keys.ClientCHK;
import freenet.keys.ClientCHKBlock;
import freenet.keys.Key;
import freenet.keys.NodeCHK;
import freenet.node.KeysFetchingLocally;
import freenet.support.Logger;
import freenet.support.MemoryLimitedChunk;
import freenet.support.MemoryLimitedJob;
import freenet.support.api.LockableRandomAccessBuffer.RAFLock;
import freenet.support.io.NativeThread;
import freenet.support.io.StorageFormatException;
/** Represents a single segment, in memory and on disk. Handles storage and decoding. Note that the
* on-disk data, and therefore the read-in metadata, may be inaccurate; we check everything
* opportunistically. Hence we are very robust (but not completely immune) to disk corruption.
* @see SplitFileFetcherStorage */
public class SplitFileFetcherSegmentStorage {
// Set this to false to turn off checking the CHKs on blocks decoded (and encoded) via FEC.
// Generally it is a good idea to have consistent behaviour regardless of what order we fetched
// the blocks in, whether binary blobs are enabled etc ... and this has caught nasty bugs in
// the past, although now we have hashes at file level ...
private static final boolean FORCE_CHECK_FEC_KEYS = true;
/** The segment number within the splitfile */
final int segNo;
/** Offset to the segment's block data. Initially we fill this up. */
final long segmentBlockDataOffset;
/** Offset to the segment's cross-check block data. This may be kept separately if we are going
* to complete the fetch via truncation, otherwise it's at the end of the segment data. */
final long segmentCrossCheckBlockDataOffset;
/** Offset to the segment's status metadata storage. */
final long segmentStatusOffset;
/** Length of the segment status for purposes of locating it on disk, may be larger than
* segmentStatusLength. */
final int segmentStatusPaddedLength;
/** Offset to the segment's key list */
final long segmentKeyListOffset;
/** Length of the segment key list */
final int segmentKeyListLength;
/** The splitfile */
final SplitFileFetcherStorage parent;
/** Count of data blocks (actual data divided up into CHKs, though the last one will be
* padded). Numbered 0 .. dataBlocks-1. */
public final int dataBlocks;
/** Count of cross-segment check blocks. These occur only in larger splitfiles and count as
* data blocks for segment-level FEC, but they also count as check blocks for cross-segment
* level FEC. Generally between 0 and 3. Numbered dataBlocks .. dataBlocks+crossSegmentCheckBlocks-1 */
public final int crossSegmentCheckBlocks;
/** Count of check blocks (generated by FEC). Numbered
* dataBlocks+crossSegmentCheckBlocks .. dataBlocks+crossSegmentCheckBlocks+checkBlocks-1 */
public final int checkBlocks;
/** Keeps track of how many times we've tried each block, which blocks we have downloaded etc. */
private final SplitFileFetcherSegmentBlockChooser blockChooser;
/** What is the order of the blocks on disk? Should be kept consistent with blocksFound! Is
* read from disk on startup and may be inaccurate, checked on FEC decode. Elements: -1 = not
* fetched yet. */
private final int[] blocksFetched;
/** True if we have downloaded and decoded all the data blocks and cross-segment check blocks,
* and written them to their final location in the parent storage file. */
private boolean succeeded;
/** True if we have not only downloaded and decoded, but also finished with encoding and
* queueing healing blocks. */
private boolean finished;
/** True if the segment has been cancelled, has failed due to an internal error, etc. In which
* case it is not interested in further blocks. Not true if it has run out of retries, in which
* case (for cross-segment) we may still be interested in blocks. */
private boolean failed;
/** True if we've run out of retries. */
private boolean failedRetries;
/** True if the metadata needs writing but isn't going to be written immediately. */
private boolean metadataDirty;
/** True if the metadata was corrupt and we need to innerDecode(). */
private boolean corruptMetadata;
/** The cross segments for each data or cross-segment check block. This allows us to tell the
* cross-segments when we may have data to decode. The array is null if there are no
* cross-segments, and the elements are null if there is no associated cross-segment. */
private final SplitFileFetcherCrossSegmentStorage[] crossSegmentsByBlock;
private SoftReference<SplitFileSegmentKeys> keysCache;
private boolean tryDecode;
private int crossDataBlocksAllocated;
private int crossCheckBlocksAllocated;
/** Number of blocks we've given up on. */
private int failedBlocks;
private static volatile boolean logMINOR;
static {
Logger.registerClass(SplitFileFetcherSegmentStorage.class);
}
/**
* Construct a segment.
* @param parent
* @param segNumber
* @param splitfileType
* @param dataBlocks
* @param checkBlocks
* @param crossCheckBlocks
* @param segmentDataOffset
* @param segmentCrossCheckDataOffset -1 to mean store the cross-check blocks just after the
* data and check blocks for this segment. Otherwise the offset.
* @param segmentKeysOffset
* @param segmentStatusOffset
* @param writeRetries
* @param keys
*/
public SplitFileFetcherSegmentStorage(SplitFileFetcherStorage parent, int segNumber,
SplitfileAlgorithm splitfileType, int dataBlocks, int checkBlocks, int crossCheckBlocks,
long segmentDataOffset, long segmentCrossCheckDataOffset,
long segmentKeysOffset, long segmentStatusOffset,
boolean writeRetries, SplitFileSegmentKeys keys, KeysFetchingLocally keysFetching) {
this.parent = parent;
this.segNo = segNumber;
this.dataBlocks = dataBlocks;
this.checkBlocks = checkBlocks;
this.crossSegmentCheckBlocks = crossCheckBlocks;
int total = dataBlocks + checkBlocks + crossSegmentCheckBlocks;
boolean ignoreLastBlock =
(segNo == parent.segments.length-1 && parent.lastBlockMightNotBePadded());
this.blockChooser = new SplitFileFetcherSegmentBlockChooser(total, parent.random,
parent.maxRetries, parent.cooldownTries, parent.cooldownLength, this,
keysFetching, ignoreLastBlock ? dataBlocks - 1 : -1);
int minFetched = blocksForDecode();
if(crossCheckBlocks != 0)
crossSegmentsByBlock = new SplitFileFetcherCrossSegmentStorage[minFetched];
else
crossSegmentsByBlock = null;
blocksFetched = new int[minFetched];
for(int i=0;i<blocksFetched.length;i++) blocksFetched[i] = -1;
segmentStatusPaddedLength = paddedStoredSegmentStatusLength(dataBlocks, checkBlocks,
crossCheckBlocks, writeRetries, parent.checksumLength, parent.persistent);
segmentKeyListLength =
storedKeysLength(blocksForDecode(), checkBlocks,
parent.splitfileSingleCryptoKey != null, parent.checksumLength);
this.segmentBlockDataOffset = segmentDataOffset;
if(segmentCrossCheckDataOffset == -1) {
segmentCrossCheckDataOffset =
segmentBlockDataOffset + dataBlocks * CHKBlock.DATA_LENGTH;
}
this.segmentCrossCheckBlockDataOffset = segmentCrossCheckDataOffset;
this.segmentKeyListOffset = segmentKeysOffset;
this.segmentStatusOffset = segmentStatusOffset;
// This must be passed in here or we will read the uninitialised keys!
keysCache = new SoftReference<SplitFileSegmentKeys>(keys);
}
/** Construct from a saved file. Uses the DataInputStream to read static settings, i.e. number
* of blocks, does not use the RAF to read block status etc; caller must call readMetadata and
* readKeys separately for that.
* @param splitFileFetcherStorage
* @param raf
* @param dis DataInputStream to which the static settings have been saved. Anything else we
* will need to read separately from the RandomAccessBuffer.
* @param segNo The segment number.
* @param segmentCrossCheckDataOffset -1 to mean store the cross-check blocks just after the
* data and check blocks for this segment. Otherwise the offset.
* @throws IOException
* @throws StorageFormatException
*/
public SplitFileFetcherSegmentStorage(SplitFileFetcherStorage parent, DataInputStream dis,
int segNo, boolean writeRetries, long segmentDataOffset, long segmentCrossCheckDataOffset,
long segmentKeysOffset, long segmentStatusOffset, KeysFetchingLocally keysFetching)
throws IOException, StorageFormatException {
this.segNo = segNo;
this.parent = parent;
this.dataBlocks = dis.readInt();
if(dataBlocks < 1 || dataBlocks > 256)
throw new StorageFormatException("Bad data block count");
this.crossSegmentCheckBlocks = dis.readInt();
// REDFLAG one day we will support more than 256 blocks per segment?
if(crossSegmentCheckBlocks < 0 || crossSegmentCheckBlocks > 256)
throw new StorageFormatException("Bad cross-segment check block count");
this.checkBlocks = dis.readInt();
if(checkBlocks < 0 || checkBlocks > 256)
throw new StorageFormatException("Bad check block count");
int total = dataBlocks+checkBlocks+crossSegmentCheckBlocks;
if(total > 256)
throw new StorageFormatException("Too many blocks in segment");
boolean ignoreLastBlock =
(segNo == parent.segments.length-1 && parent.lastBlockMightNotBePadded());
blockChooser = new SplitFileFetcherSegmentBlockChooser(total, parent.random,
parent.maxRetries, parent.cooldownTries, parent.cooldownLength,
this, keysFetching, ignoreLastBlock ? dataBlocks - 1 : -1);
int minFetched = blocksForDecode();
if(crossSegmentCheckBlocks != 0)
crossSegmentsByBlock = new SplitFileFetcherCrossSegmentStorage[minFetched];
else
crossSegmentsByBlock = null;
blocksFetched = new int[minFetched];
for(int i=0;i<blocksFetched.length;i++) blocksFetched[i] = -1;
segmentStatusPaddedLength = paddedStoredSegmentStatusLength(dataBlocks, checkBlocks,
crossSegmentCheckBlocks, writeRetries, parent.checksumLength, true);
segmentKeyListLength =
storedKeysLength(blocksForDecode(), checkBlocks,
parent.splitfileSingleCryptoKey != null, parent.checksumLength);
keysCache = null; // Will be read later
this.segmentBlockDataOffset = segmentDataOffset;
if(segmentCrossCheckDataOffset == -1) {
segmentCrossCheckDataOffset =
segmentBlockDataOffset + dataBlocks * CHKBlock.DATA_LENGTH;
}
this.segmentCrossCheckBlockDataOffset = segmentCrossCheckDataOffset;
this.segmentKeyListOffset = segmentKeysOffset;
this.segmentStatusOffset = segmentStatusOffset;
}
public SplitFileSegmentKeys getSegmentKeys() throws IOException {
synchronized(this) {
if(keysCache != null) {
SplitFileSegmentKeys cached = keysCache.get();
if(cached != null) return cached;
}
SplitFileSegmentKeys keys;
try {
keys = readSegmentKeys();
} catch (ChecksumFailedException e) {
Logger.error(this, "Keys corrupted on "+this+" !");
// Treat as IOException, i.e. fatal. FIXME!
throw new IOException(e);
}
if(keys == null) return keys;
keysCache = new SoftReference<SplitFileSegmentKeys>(keys);
return keys;
}
}
SplitFileSegmentKeys readSegmentKeys() throws IOException, ChecksumFailedException {
SplitFileSegmentKeys keys = new SplitFileSegmentKeys(blocksForDecode(), checkBlocks, parent.splitfileSingleCryptoKey, parent.splitfileSingleCryptoAlgorithm);
byte[] buf = new byte[SplitFileSegmentKeys.storedKeysLength(blocksForDecode(), checkBlocks, parent.splitfileSingleCryptoKey != null)];
parent.preadChecksummed(segmentKeyListOffset, buf, 0, buf.length);
DataInputStream dis = new DataInputStream(new ByteArrayInputStream(buf));
keys.readKeys(dis, false);
keys.readKeys(dis, true);
return keys;
}
/** Write the status metadata to disk, after a series of updates. */
public void writeMetadata() throws IOException {
writeMetadata(true);
}
/** Write the status metadata to disk, after a series of updates. */
public void writeMetadata(boolean force) throws IOException {
innerWriteMetadata(force);
}
/** Read all the blocks, encode them according to their supposed keys and check that they are
* in fact the blocks that they should be. If the metadata is inaccurate, update it and
* writeMetadata(). If we have enough blocks to decode, and we don't have all the blocks, then
* schedule a decode on the FEC thread.
* @return True if we scheduled a decode or are already finished. False if we do not have
* enough blocks to decode and need to fetch more blocks. */
public boolean tryStartDecode() {
synchronized(this) {
if(succeeded || failed || finished) return false;
if(!corruptMetadata && blockChooser.successCount() < blocksForDecode()) return false;
if(tryDecode) return true;
tryDecode = true;
}
long limit = totalBlocks() * CHKBlock.DATA_LENGTH +
Math.max(parent.fecCodec.maxMemoryOverheadDecode(blocksForDecode(), checkBlocks),
parent.fecCodec.maxMemoryOverheadEncode(blocksForDecode(), checkBlocks));
final int prio = NativeThread.LOW_PRIORITY;
parent.memoryLimitedJobRunner.queueJob(new MemoryLimitedJob(limit) {
@Override
public int getPriority() {
return prio;
}
@Override
public boolean start(MemoryLimitedChunk chunk) {
boolean shutdown = false;
CheckpointLock lock = null;
try {
lock = parent.jobRunner.lock();
innerDecode(chunk);
} catch (IOException e) {
Logger.error(this, "Failed to decode "+this+" because of disk error: "+e, e);
parent.failOnDiskError(e);
} catch (PersistenceDisabledException e) {
// Shutting down.
// We don't call the callback here, so we don't care.
shutdown = true;
} catch (Throwable e) {
Logger.error(this, "Failed to decode "+this+" because of internal error: "+e, e);
parent.fail(new FetchException(FetchExceptionMode.INTERNAL_ERROR, e));
} finally {
chunk.release();
synchronized(SplitFileFetcherSegmentStorage.this) {
tryDecode = false;
}
try {
// We may not have completed, but we HAVE finished.
// Need to tell the parent, so it can do something about it.
// In particular, if we failed, we may need to complete cancellation, and we
// can't do that until both tryDecode=false and parent gets the callback.
if(!shutdown)
parent.finishedEncoding(SplitFileFetcherSegmentStorage.this);
} finally {
if(lock != null) lock.unlock(false, prio);
}
}
return true;
}
});
return true;
}
/** Attempt FEC decoding. Check blocks before decoding in case there is disk corruption. Check
* the new decoded blocks afterwards to ensure reproducible behaviour. */
private void innerDecode(MemoryLimitedChunk chunk) throws IOException {
if(logMINOR) Logger.minor(this, "Trying to decode "+this+" for "+parent);
// Even if we fail, once we set tryDecode=true, we need to notify the parent when we're done.
boolean fail;
synchronized(this) {
if(finished) return;
fail = succeeded || failed;
if(fail) finished = true;
}
if(fail) {
return;
}
int totalBlocks = totalBlocks();
byte[][] allBlocks = readAllBlocks();
SplitFileSegmentKeys keys = getSegmentKeys();
if(allBlocks == null || keys == null) {
return;
}
class MyBlock {
final byte[] buf;
final int blockNumber;
final int slot;
MyBlock(byte[] buf, int blockNumber, int slot) {
this.buf = buf;
this.blockNumber = blockNumber;
this.slot = slot;
}
}
ArrayList<MyBlock> maybeBlocks = new ArrayList<MyBlock>();
int fetchedCount = 0;
synchronized(this) {
boolean[] used = new boolean[totalBlocks];
for(short i=0;i<blocksFetched.length;i++) {
if(blocksFetched[i] < 0 || blocksFetched[i] > totalBlocks) {
Logger.warning(this, "Inconsistency decoding splitfile: slot "+i+" has bogus block number "+blocksFetched[i]);
if(blocksFetched[i] != -1)
blocksFetched[i] = -1;
maybeBlocks.add(new MyBlock(allBlocks[i], (short)-1, i));
continue;
} else if(used[blocksFetched[i]]) {
Logger.warning(this, "Inconsistency decoding splitfile: slot "+i+" has duplicate block number "+blocksFetched[i]);
blocksFetched[i] = -1;
continue;
} else {
if(logMINOR) Logger.minor(this, "Found block "+blocksFetched[i]+" in slot "+i);
maybeBlocks.add(new MyBlock(allBlocks[i], blocksFetched[i], i));
used[blocksFetched[i]] = true;
fetchedCount++;
}
}
if(fetchedCount < blocksForDecode()) {
int oldBlocksFetchedCount = blockChooser.successCount();
blockChooser.replaceSuccesses(used);
if(blockChooser.successCount() != oldBlocksFetchedCount) {
Logger.warning(this, "Corrected block count to "+blockChooser.successCount()+
" from "+oldBlocksFetchedCount);
}
}
}
if(fetchedCount < blocksForDecode()) {
// We *DO* want to write the metadata immediately here, because we've just gone over
// the blocks on disk and determined that the metadata is inaccurate.
writeMetadata();
boolean wasCorrupt;
synchronized(this) {
wasCorrupt = corruptMetadata;
corruptMetadata = false;
}
parent.restartedAfterDataCorruption(wasCorrupt);
return;
}
// Check the blocks and put them into the correct positions.
int validBlocks = 0;
int validDataBlocks = 0;
byte[][] dataBlocks = new byte[blocksForDecode()][];
byte[][] checkBlocks = new byte[this.checkBlocks][];
for(MyBlock test : maybeBlocks) {
boolean failed = false;
int blockNumber = test.blockNumber;
byte[] buf = test.buf;
ClientCHK decodeKey = blockNumber == -1 ? null : keys.getKey(blockNumber, null, false);
// Encode it to check whether the key is the same.
try {
ClientCHKBlock block =
ClientCHKBlock.encodeSplitfileBlock(buf, decodeKey.getCryptoKey(), decodeKey.getCryptoAlgorithm());
ClientCHK actualKey = block.getClientKey();
if(decodeKey == null || !decodeKey.equals(actualKey)) {
// Is it a different block?
blockNumber = (short)keys.getBlockNumber(actualKey, null);
if(blockNumber == -1) {
Logger.error(this, "Block which should be block #"+test.blockNumber+" in slot "+test.slot+" for segment "+this+" is not valid for key "+decodeKey);
failed = true;
synchronized(this) {
blockChooser.onUnSuccess(blockNumber);
if(blocksFetched[test.slot] == test.blockNumber) {
blocksFetched[test.slot] = (short)-1;
}
}
} else {
synchronized(this) {
blockChooser.onUnSuccess(blockNumber);
blocksFetched[test.slot] = blockNumber;
this.blockChooser.onSuccess(blockNumber);
}
}
}
} catch (CHKEncodeException e) {
Logger.error(this, "Block which should be "+blockNumber+" for segment "+this+" cannot be encoded for key "+decodeKey);
failed = true;
}
if(!failed) {
validBlocks++;
if(blockNumber < blocksForDecode())
validDataBlocks++;
if(blockNumber < dataBlocks.length)
dataBlocks[blockNumber] = buf;
else
checkBlocks[blockNumber - dataBlocks.length] = buf;
}
}
allBlocks = null;
maybeBlocks.clear();
maybeBlocks = null;
if(validBlocks < blocksForDecode()) {
// Metadata didn't match blocks on disk; write metadata immediately.
writeMetadata();
boolean wasCorrupt;
synchronized(this) {
wasCorrupt = corruptMetadata;
corruptMetadata = false;
}
parent.restartedAfterDataCorruption(wasCorrupt);
return;
}
boolean[] dataBlocksPresent = new boolean[dataBlocks.length];
boolean[] checkBlocksPresent = new boolean[checkBlocks.length];
for(int i=0;i<dataBlocks.length;i++) {
if(dataBlocks[i] == null) {
dataBlocks[i] = new byte[CHKBlock.DATA_LENGTH];
} else {
dataBlocksPresent[i] = true;
}
}
for(int i=0;i<checkBlocks.length;i++) {
if(checkBlocks[i] == null) {
checkBlocks[i] = new byte[CHKBlock.DATA_LENGTH];
} else {
checkBlocksPresent[i] = true;
}
}
if(validDataBlocks < blocksForDecode()) {
if(logMINOR) Logger.minor(this, "Decoding in memory for "+this);
parent.fecCodec.decode(dataBlocks, checkBlocks, dataBlocksPresent, checkBlocksPresent, CHKBlock.DATA_LENGTH);
}
boolean capturingBinaryBlob = parent.fetcher.wantBinaryBlob();
boolean checkDecodedKeys = FORCE_CHECK_FEC_KEYS || capturingBinaryBlob;
if(checkDecodedKeys) {
// Check that the decoded blocks correspond to the keys given.
// This will catch odd bugs and ensure consistent behaviour.
checkDecodedDataBlocks(dataBlocks, dataBlocksPresent, keys, capturingBinaryBlob);
}
writeAllDataBlocks(dataBlocks);
// Report success if we are not verifying decoded keys, but if we *are*, we need to wait
// until FEC encoding completes.
if(!checkDecodedKeys)
parent.finishedSuccess(this);
triggerAllCrossSegmentCallbacks();
parent.fecCodec.encode(dataBlocks, checkBlocks, checkBlocksPresent, CHKBlock.DATA_LENGTH);
// Check these *after* we complete, to reduce the critical path.
// FIXME possibility of inconsistency with malicious splitfiles?
if(checkDecodedKeys) {
if(!checkEncodedDataBlocks(checkBlocks, checkBlocksPresent, keys, capturingBinaryBlob)) {
// The downloaded blocks were correct, the encoded blocks are not.
// That means the splitfile is broken as inserted, or there's some wierd in-memory data corruption.
synchronized(this) {
finished = true;
}
parent.fail(new FetchException(FetchExceptionMode.SPLITFILE_DECODE_ERROR, "Encoded blocks do not match metadata"));
return;
}
parent.finishedSuccess(this);
}
queueHeal(dataBlocks, checkBlocks, dataBlocksPresent, checkBlocksPresent);
dataBlocks = null;
checkBlocks = null;
// Finished a segment, definitely want to write metadata right now.
writeMetadata();
// Now we've REALLY finished.
synchronized(this) {
corruptMetadata = false;
finished = true;
}
if(logMINOR) Logger.minor(this, "Finished decoding "+this+" for "+parent);
}
private void checkDecodedDataBlocks(byte[][] dataBlocks, boolean[] dataBlocksPresent,
SplitFileSegmentKeys keys, boolean capturingBinaryBlob) {
for(int i=0;i<dataBlocks.length;i++) {
if(dataBlocksPresent[i]) continue;
ClientCHK decodeKey = keys.getKey(i, null, false);
// Encode it to check whether the key is the same.
ClientCHKBlock block;
try {
block = ClientCHKBlock.encodeSplitfileBlock(dataBlocks[i], decodeKey.getCryptoKey(), decodeKey.getCryptoAlgorithm());
ClientCHK actualKey = block.getClientKey();
if(!actualKey.equals(decodeKey)) {
if(i == dataBlocks.length-1 && this.segNo == parent.segments.length-1 &&
parent.lastBlockMightNotBePadded()) {
// Ignore.
return;
} else {
// Usual case.
parent.fail(new FetchException(FetchExceptionMode.SPLITFILE_DECODE_ERROR, "Decoded block does not match expected key"));
return;
}
}
if(capturingBinaryBlob)
parent.fetcher.maybeAddToBinaryBlob(block);
} catch (CHKEncodeException e) {
// Impossible!
parent.fail(new FetchException(FetchExceptionMode.INTERNAL_ERROR, "Decoded block could not be encoded"));
Logger.error(this, "Impossible: Decoded block could not be encoded");
return;
}
}
}
private boolean checkEncodedDataBlocks(byte[][] checkBlocks, boolean[] checkBlocksPresent,
SplitFileSegmentKeys keys, boolean capturingBinaryBlob) {
for(int i=0;i<checkBlocks.length;i++) {
if(checkBlocksPresent[i]) continue;
ClientCHK decodeKey = keys.getKey(i+blocksForDecode(), null, false);
// Encode it to check whether the key is the same.
ClientCHKBlock block;
try {
block = ClientCHKBlock.encodeSplitfileBlock(checkBlocks[i], decodeKey.getCryptoKey(), decodeKey.getCryptoAlgorithm());
ClientCHK actualKey = block.getClientKey();
if(!actualKey.equals(decodeKey)) {
Logger.error(this, "Splitfile check block "+i+" does not encode to expected key for "+this+" for "+parent);
return false;
}
if(capturingBinaryBlob)
parent.fetcher.maybeAddToBinaryBlob(block);
} catch (CHKEncodeException e) {
// Impossible!
parent.fail(new FetchException(FetchExceptionMode.INTERNAL_ERROR, "Decoded block could not be encoded"));
Logger.error(this, "Impossible: Decoded block could not be encoded");
return false;
}
}
return true;
}
private void queueHeal(byte[][] dataBlocks, byte[][] checkBlocks, boolean[] dataBlocksPresent, boolean[] checkBlocksPresent) throws IOException {
for(int i=0;i<dataBlocks.length;i++) {
if(dataBlocksPresent[i]) continue;
if(blockChooser.getRetries(i) == 0) continue;
queueHeal(i, dataBlocks[i]);
}
for(int i=0;i<checkBlocks.length;i++) {
if(checkBlocksPresent[i]) continue;
if(blockChooser.getRetries(i+dataBlocks.length) == 0) continue;
queueHeal(i+dataBlocks.length, checkBlocks[i]);
}
}
private void queueHeal(int blockNumber, byte[] data) throws IOException {
byte[] cryptoKey;
byte cryptoAlgorithm;
if(parent.splitfileSingleCryptoKey != null) {
cryptoKey = parent.splitfileSingleCryptoKey;
cryptoAlgorithm = parent.splitfileSingleCryptoAlgorithm;
} else {
ClientCHK key = getSegmentKeys().getKey(blockNumber, null, false);
cryptoKey = key.getCryptoKey();
cryptoAlgorithm = key.getCryptoAlgorithm();
}
parent.fetcher.queueHeal(data, cryptoKey, cryptoAlgorithm);
}
private synchronized byte[][] readAllBlocks() throws IOException {
RAFLock lock = parent.lockRAFOpen();
try {
// FIXME consider using a single big byte[].
byte[][] ret = new byte[blocksForDecode()][];
for(int i=0;i<ret.length;i++)
ret[i] = readBlock(i);
return ret;
} finally {
lock.unlock();
}
}
private void triggerAllCrossSegmentCallbacks() {
SplitFileFetcherCrossSegmentStorage[] crossSegmentsByBlockCopy;
synchronized(this) {
if(crossSegmentsByBlock == null) return;
crossSegmentsByBlockCopy = Arrays.copyOf(this.crossSegmentsByBlock, this.crossSegmentsByBlock.length);
}
for(int i=0;i<crossSegmentsByBlockCopy.length;i++) {
SplitFileFetcherCrossSegmentStorage s = crossSegmentsByBlockCopy[i];
if(s != null)
s.onFetchedRelevantBlock(this, i);
}
}
/** Write a full set of data blocks to disk and update the metadata accordingly. */
private void writeAllDataBlocks(byte[][] dataBlocks) throws IOException {
RAFLock lock = parent.lockRAFOpen();
try {
synchronized(this) {
assert(dataBlocks.length == blocksForDecode());
for(int i=0;i<dataBlocks.length;i++) {
writeDownloadedBlock(i, dataBlocks[i]);
blockChooser.onSuccess(i);
blocksFetched[i] = (short)i;
}
succeeded = true;
}
} finally {
lock.unlock();
}
}
final int totalBlocks() {
return dataBlocks + crossSegmentCheckBlocks + checkBlocks;
}
/** A block has been fetched which the caller believes is one of ours. Check whether it is in
* fact ours, and that we don't have it already. Find the key and decode it, and add it to our
* collection. If any cross-segments are waiting for this block, tell them. If we can decode,
* do so. Can be quite involved, should be called off-thread.
* @param key
* @param block
* @throws IOException If we were unable to write the block to disk.
* @return True if we successfully decoded a block, in which case the function will be called
* again. False if there was no match, if we have already fetched that block, or if various
* errors occurred.
*/
public boolean onGotKey(NodeCHK key, CHKBlock block) throws IOException {
SplitFileSegmentKeys keys = getSegmentKeys();
if(keys == null) return false;
int blockNumber;
ClientCHK decodeKey;
synchronized(this) {
if(succeeded || failed || finished) return false;
blockNumber = blockChooser.getBlockNumber(keys, key);
if(blockNumber == -1) {
if(logMINOR) Logger.minor(this, "Block not found "+key);
return false;
}
if(blockChooser.hasSucceeded(blockNumber))
return false; // Even if this is inaccurate, it will be corrected on a FEC attempt.
if(tryDecode)
return false;
decodeKey = keys.getKey(blockNumber, null, false);
}
ClientCHKBlock decodedBlock;
byte[] decodedData;
try {
decodedBlock = new ClientCHKBlock(block, decodeKey);
decodedData = decodedBlock.memoryDecode();
} catch (CHKVerifyException e) {
Logger.error(this, "Verify failed on block for "+decodeKey);
return false;
} catch (CHKDecodeException e) {
Logger.error(this, "Decode failed on block for "+decodeKey);
return false;
}
return innerOnGotKey(key, decodedBlock, keys, blockNumber, decodedData);
}
/**
* Store a block that was fetched or decoded.
* @param key The key for the block.
* @param block The block.
* @param blockNumber A block number for the data. (But there may be several block numbers with
* identical keys).
* @param decodedData The block's payload data.
* @return True if we decoded the block, or
* @throws IOException
*/
boolean innerOnGotKey(NodeCHK key, ClientCHKBlock block, SplitFileSegmentKeys keys,
int blockNumber, byte[] decodedData)
throws IOException {
if(decodedData.length != CHKBlock.DATA_LENGTH) {
if(blockNumber == dataBlocks-1 && this.segNo == parent.segments.length-1 &&
parent.lastBlockMightNotBePadded()) {
// Can't use it for FEC decode. Just ignore it.
// FIXME We can use it if we have all the other data blocks, but it's not worth
// checking, and might have non-obvious complications if we e.g. have data loss in
// FEC decoding.
Logger.warning(this, "Ignoring last block");
return false;
} else {
parent.fail(new FetchException(FetchExceptionMode.SPLITFILE_ERROR, "Splitfile block is too short"));
return false;
}
}
SplitFileFetcherCrossSegmentStorage callback = null;
// Clearer to do duplicate handling here, plus we only need to decode once.
boolean saved = false;
do {
short nextBlockNumber;
int slotNumber;
// LOCKING We have to do the write inside the lock to prevent parallel decodes messing up etc.
synchronized(this) {
if(succeeded || failed || finished) {
if(logMINOR) Logger.minor(this, "Already succeeded/finished/failed");
return saved; // Don't double remove from bloom filter!
}
if(blockChooser.hasSucceeded(blockNumber)) {
if(logMINOR) Logger.minor(this, "Already have block "+blockNumber);
blockNumber = blockChooser.getBlockNumber(keys, key);
if(logMINOR) Logger.minor(this, "Trying block "+blockNumber);
continue;
}
if(blockChooser.successCount() >= blocksForDecode()) {
if(logMINOR) Logger.minor(this, "Already decoding");
// Don't remove it from the filter. We haven't written it, so it could be
// removed twice. And if we decode successfully, the filter will be ignored.
return saved;
}
slotNumber = findFreeSlot();
assert(slotNumber != -1);
blocksFetched[slotNumber] = blockNumber;
blockChooser.onSuccess(blockNumber);
RAFLock lock = parent.lockRAFOpen();
try {
writeDownloadedBlock(slotNumber, decodedData);
saved = true;
} catch (IOException e) {
blocksFetched[slotNumber] = -1;
blockChooser.onUnSuccess(blockNumber);
Logger.error(this, "Unable to write downloaded block to disk: "+e, e);
throw e;
} finally {
lock.unlock();
}
if(crossSegmentsByBlock != null && blockNumber < crossSegmentsByBlock.length) {
callback = crossSegmentsByBlock[blockNumber];
}
nextBlockNumber = (short) blockChooser.getBlockNumber(keys, key);
metadataDirty = true;
}
if(callback != null)
callback.onFetchedRelevantBlock(this, blockNumber);
lazyWriteMetadata();
if(logMINOR) Logger.minor(this, "Got block "+blockNumber+" ("+key+") for "+this+" for "+parent+" written to "+slotNumber);
parent.jobRunner.queueNormalOrDrop(new PersistentJob() {
@Override
public boolean run(ClientContext context) {
parent.fetcher.onFetchedBlock();
return false;
}
});
tryStartDecode();
parent.fetcher.maybeAddToBinaryBlob(block);
blockNumber = nextBlockNumber;
} while(blockNumber != -1);
return saved; // Return true ONLY if we actually saved the block to disk.
}
private synchronized int findFreeSlot() {
for(int i=0;i<blocksFetched.length;i++) {
if(blocksFetched[i] == -1) return i;
}
return -1;
}
/** Caller must have already lock()'ed parent.raf and synchronized(this).
* @throws IOException */
private synchronized void writeDownloadedBlock(int slotNumber, byte[] data) throws IOException {
// FIXME Do we need to pad here for really old splitfiles, or does the FEC code do it?
if(data.length != CHKBlock.DATA_LENGTH) throw new IllegalArgumentException();
if(slotNumber >= blocksForDecode()) throw new IllegalArgumentException();
parent.writeBlock(this, slotNumber, data);
}
long blockOffset(int slotNumber) {
if(slotNumber < dataBlocks) {
return segmentBlockDataOffset + slotNumber * CHKBlock.DATA_LENGTH;
} else if(slotNumber >= (dataBlocks + crossSegmentCheckBlocks)) {
slotNumber -= crossSegmentCheckBlocks;
return segmentBlockDataOffset + slotNumber * CHKBlock.DATA_LENGTH;
} else {
slotNumber -= dataBlocks;
return segmentCrossCheckBlockDataOffset + slotNumber * CHKBlock.DATA_LENGTH;
}
}
/** Write the metadata (status). Caller should already have taken parent.raf.lock() and
* synchronized(this). Metadata is fairly sparse on disk, we are expected to deduce it (and
* check it) when constructing.
* @throws IOException */
private void innerWriteMetadata(boolean force) throws IOException {
if(!parent.persistent) return;
synchronized(this) {
if(!(force || metadataDirty)) return;
if(logMINOR) Logger.debug(this, "Writing metadata for "+segNo+" for "+parent, new Exception("debug"));
OutputStream cos = parent.writeChecksummedTo(segmentStatusOffset, segmentStatusPaddedLength);
try {
DataOutputStream dos = new DataOutputStream(cos);
for(int s : blocksFetched)
dos.writeInt(s);
blockChooser.writeRetries(dos);
dos.close();
} catch (IOException e) {
throw new Error(e); // Impossible!
}
metadataDirty = false;
}
return;
}
/** Only called during construction. Reads the variable metadata from the RandomAccessBuffer.
* @throws ChecksumFailedException
* @throws StorageFormatException */
void readMetadata() throws IOException, StorageFormatException, ChecksumFailedException {
byte[] buf = new byte[segmentStatusPaddedLength];
try {
parent.preadChecksummed(segmentStatusOffset, buf, 0, segmentStatusPaddedLength-parent.checksumLength);
} catch (ChecksumFailedException e) {
corruptMetadata = true;
throw e;
}
DataInputStream dis = new DataInputStream(new ByteArrayInputStream(buf));
for(int i=0;i<blocksFetched.length;i++) {
int s = dis.readInt();
if(s < -1 || s >= totalBlocks())
throw new StorageFormatException("Bogus block number in blocksFetched["+i+"]: "+s);
blocksFetched[i] = s;
if(s >= 0) {
if(!blockChooser.hasSucceeded(s)) {
blockChooser.onSuccess(s);
} else {
throw new StorageFormatException("Duplicated block number in blocksFetched in "+this);
}
}
}
blockChooser.readRetries(dis);
failedBlocks = blockChooser.countFailedBlocks();
if(failedBlocks >= checkBlocks) {
failedRetries = true;
}
dis.close();
}
public static int storedSegmentStatusLength(int dataBlocks, int checkBlocks, int crossCheckBlocks,
boolean trackRetries) {
int fetchedBlocks = dataBlocks + crossCheckBlocks;
int totalBlocks = dataBlocks + checkBlocks + crossCheckBlocks;
return fetchedBlocks * 4 + (trackRetries ? (totalBlocks * 4) : 0);
}
public static int paddedStoredSegmentStatusLength(int dataBlocks, int checkBlocks, int crossCheckBlocks,
boolean trackRetries, int checksumLength, boolean persistent) {
if(!persistent) return 0;
return storedSegmentStatusLength(dataBlocks, checkBlocks, crossCheckBlocks, trackRetries) +
checksumLength;
}
private final int blocksForDecode() {
return dataBlocks + crossSegmentCheckBlocks;
}
public synchronized boolean isFinished() {
return finished || failed || failedRetries;
}
public synchronized boolean isDecodingOrFinished() {
return finished || failed || succeeded || tryDecode;
}
public synchronized boolean hasSucceeded() {
return succeeded;
}
/** Write content to an OutputStream. We already have raf.lock().
* @throws IOException */
void writeToInner(OutputStream os) throws IOException {
// FIXME if we use readAllBlocks() we'll need to run on the memory limited queue???
for(int i=0;i<dataBlocks;i++) { // Don't include cross-check blocks.
byte[] buf = readBlock(i);
if(i == dataBlocks-1 && this.segNo == parent.segments.length-1) {
int length = (int) (parent.finalLength % CHKBlock.DATA_LENGTH);
if(length == 0) length = CHKBlock.DATA_LENGTH;
os.write(buf, 0, length);
} else {
os.write(buf);
}
}
}
/** Read a single block from a specific slot, which could be any block number.
* @throws IOException If an error occurred reading the data from disk. */
private synchronized byte[] readBlock(int slotNumber) throws IOException {
if(slotNumber >= blocksForDecode()) throw new IllegalArgumentException();
return parent.readBlock(this, slotNumber);
}
public void onNonFatalFailure(int blockNumber) {
boolean givenUp = false;
boolean kill = false;
boolean wake = false;
boolean write = false;
if(logMINOR) Logger.minor(this, "Non-fatal failure on block "+blockNumber+" for "+this+" for "+parent);
synchronized(this) {
long cooldown = blockChooser.overallCooldownTime();
if(blockChooser.onNonFatalFailure(blockNumber)) {
if(logMINOR) Logger.minor(this, "Giving up on block "+blockNumber+" on "+this);
givenUp = true;
failedBlocks++;
int target = checkBlocks;
if(!parent.lastBlockMightNotBePadded()) target++;
if(failedBlocks >= target) {
kill = true;
failedRetries = true;
if(crossSegmentsByBlock == null) {
// If not cross-segment, fail completely.
// If cross-segment, we may still pick up blocks from elsewhere.
finished = true;
failed = true;
}
} else {
write = true;
}
} else {
if(logMINOR) Logger.minor(this, "Block "+blockNumber+" on "+this+" : "+blockChooser.getRetries(blockNumber)+"/"+blockChooser.maxRetries);
if(blockChooser.overallCooldownTime() < cooldown)
wake = true;
write = true;
}
if(write)
metadataDirty = true;
}
if(write)
lazyWriteMetadata();
if(givenUp)
parent.failedBlock();
if(kill) {
if(crossSegmentsByBlock == null) {
// Fail the whole splitfile immediately.
parent.failOnSegment(this);
} else {
// Could still succeed. But we're not gonna find any more blocks.
// Similar to DSOnly... finishedEncoding will fail eventually when all segments
// are finished and all cross-segments are not encoding.
parent.finishedEncoding(this);
}
}
if(wake)
parent.maybeClearCooldown();
}
/** The metadata has been updated. We should write it ... at some point. CALLER MUST SET metadataDirty! */
private void lazyWriteMetadata() {
parent.lazyWriteMetadata();
}
/** Allocate a cross-segment data block. Note that this algorithm must be reproduced exactly
* for splitfile compatibility; the Random seed is actually determined by the splitfile metadata.
* @param seg The cross-segment to allocate a block for.
* @param random PRNG seeded from the splitfile metadata, which determines which blocks to
* allocate in a deterministic manner.
* @return The data block number allocated.
*/
public int allocateCrossDataBlock(SplitFileFetcherCrossSegmentStorage seg, Random random) {
int size = dataBlocks;
if(crossDataBlocksAllocated == size) return -1;
int x = 0;
for(int i=0;i<10;i++) {
x = random.nextInt(size);
if(crossSegmentsByBlock[x] == null) {
crossSegmentsByBlock[x] = seg;
crossDataBlocksAllocated++;
return x;
}
}
for(int i=0;i<size;i++) {
x++;
if(x == size) x = 0;
if(crossSegmentsByBlock[x] == null) {
crossSegmentsByBlock[x] = seg;
crossDataBlocksAllocated++;
return x;
}
}
throw new IllegalStateException("Unable to allocate cross data block even though have not used all slots up???");
}
/** Allocate a cross-segment check block. Note that this algorithm must be reproduced exactly
* for splitfile compatibility; the Random seed is actually determined by the splitfile metadata.
* @param seg The cross-segment to allocate a block for.
* @param random PRNG seeded from the splitfile metadata, which determines which blocks to
* allocate in a deterministic manner.
* @return The block number allocated (between dataBlocks and dataBlocks+crossSegmentCheckBlocks).
*/
public int allocateCrossCheckBlock(SplitFileFetcherCrossSegmentStorage seg, Random random) {
if(crossCheckBlocksAllocated == crossSegmentCheckBlocks) return -1;
int x = dataBlocks + crossSegmentCheckBlocks - (1 + random.nextInt(crossSegmentCheckBlocks));
for(int i=0;i<crossSegmentCheckBlocks;i++) {
x++;
if(x == dataBlocks + crossSegmentCheckBlocks) x = dataBlocks;
if(crossSegmentsByBlock[x] == null) {
crossSegmentsByBlock[x] = seg;
crossCheckBlocksAllocated++;
return x;
}
}
throw new IllegalStateException("Unable to allocate cross check block even though have not used all slots up???");
}
static int storedKeysLength(int dataBlocks, int checkBlocks, boolean commonDecryptKey, int checksumLength) {
return SplitFileSegmentKeys.storedKeysLength(dataBlocks, checkBlocks, commonDecryptKey) + checksumLength;
}
/** Only called during creation. Do not read the keys in before writing them! */
void writeKeysWithChecksum(SplitFileSegmentKeys keys) throws IOException {
assert(keysCache.get() == keys);
assert(this.dataBlocks + this.crossSegmentCheckBlocks == keys.dataBlocks);
assert(this.checkBlocks == keys.checkBlocks);
OutputStream cos = parent.writeChecksummedTo(segmentKeyListOffset, segmentKeyListLength);
DataOutputStream dos = new DataOutputStream(cos);
try {
keys.writeKeys(dos, false);
keys.writeKeys(dos, true);
} catch (IOException e) {
// Impossible!
throw new Error(e);
}
dos.close();
}
public boolean definitelyWantKey(NodeCHK key) {
synchronized(this) {
if(succeeded || failed || finished) return false;
}
SplitFileSegmentKeys keys;
try {
keys = getSegmentKeys();
} catch (IOException e) {
parent.failOnDiskError(e);
return false;
}
synchronized(this) { // Synched because of blocksFound
return blockChooser.getBlockNumber(keys, key) >= 0;
}
}
/** Write minimal fixed metadata for the segment. This should include lengths rather than
* offsets. Does not write cross-segment block assignments; these are handled by the
* cross-segments.
* @throws IOException */
public void writeFixedMetadata(DataOutputStream dos) throws IOException {
dos.writeInt(this.dataBlocks);
dos.writeInt(this.crossSegmentCheckBlocks);
dos.writeInt(this.checkBlocks);
}
// For unit testing.
synchronized boolean hasStartedDecode() {
return succeeded || failed || finished || tryDecode;
}
synchronized boolean hasFailed() {
return failed || failedRetries;
}
synchronized boolean[] copyDownloadedBlocks() {
return blockChooser.copyDownloadedBlocks();
}
synchronized public long countUnfetchedKeys() {
if(finished || tryDecode)
return 0;
return totalBlocks() - blockChooser.successCount();
}
synchronized public long countSendableKeys(long now, int maxRetries) {
if(finished || tryDecode)
return 0;
return blockChooser.countFetchable();
}
public synchronized void getUnfetchedKeys(List<Key> keys) throws IOException {
if(finished || tryDecode)
return;
SplitFileSegmentKeys keyList = getSegmentKeys();
for(int i=0;i<totalBlocks();i++) {
if(!blockChooser.hasSucceeded(i))
keys.add(keyList.getNodeKey(i, null, false));
}
}
/** Pick a key to fetch. Must not update any persistent field. (Cooldowns etc are fine) */
public int chooseRandomKey() {
int chosen;
synchronized(this) {
if(finished) return -1;
if(failedRetries) return -1;
if(tryDecode) {
if(logMINOR) Logger.minor(this, "Segment decoding so not choosing a key on "+this);
return -1;
}
if(corruptMetadata) return -1; // Will be fetchable after we've found out what blocks we actually have.
chosen = blockChooser.chooseKey();
if(chosen != -1) {
if(logMINOR) Logger.minor(this, "Chosen key "+chosen+"/"+totalBlocks()+" for "+this+" (retries "+blockChooser.getRetries(chosen)+"/"+blockChooser.maxRetries+")");
} else {
if(logMINOR) Logger.minor(this, "No keys chosen for "+this);
}
}
if(chosen == -1) {
long cooldownTime = blockChooser.overallCooldownTime();
if(cooldownTime > System.currentTimeMillis())
parent.increaseCooldown(this, cooldownTime);
return -1;
} else {
return chosen;
}
}
public void cancel() {
boolean decoding;
synchronized(this) {
if(finished) return;
finished = true;
decoding = tryDecode;
// If already decoding, must wait for decoder to check in before completing shutdown.
}
if(!decoding)
parent.finishedEncoding(this);
// Else must wait.
}
public synchronized long getOverallCooldownTime() {
if(finished || succeeded || failed || failedRetries) return 0;
return blockChooser.overallCooldownTime();
}
synchronized long getCooldownTime(int blockNumber) {
if(finished || succeeded || failed || failedRetries) return 0;
return blockChooser.getCooldownTime(blockNumber);
}
synchronized boolean corruptMetadata() {
return corruptMetadata;
}
public synchronized boolean needsDecode() {
if(finished || succeeded || failed) return false;
if(tryDecode) return false;
return blockChooser.successCount() == blocksForDecode();
}
public synchronized int foundBlocks() {
return blockChooser.successCount();
}
public synchronized int failedBlocks() {
return failedBlocks;
}
public synchronized ClientCHK getKey(int blockNum) {
SplitFileSegmentKeys keys;
try {
keys = getSegmentKeys();
} catch (IOException e) {
return null;
}
if(keys == null) return null;
return keys.getKey(blockNum, null, false);
}
public synchronized byte[] checkAndGetBlockData(int blockNum) throws IOException {
if(!blockChooser.hasSucceeded(blockNum)) return null;
ClientCHK key = getKey(blockNum);
if(key == null) return null;
for(int i=0;i<blocksFetched.length;i++) {
if(blocksFetched[i] == blockNum) {
byte[] buf = readBlock(i);
try {
ClientCHKBlock block =
ClientCHKBlock.encodeSplitfileBlock(buf, key.getCryptoKey(), key.getCryptoAlgorithm());
if(!(block.getClientKey().equals(key))) {
Logger.error(this, "Block "+blockNum+" in blocksFound["+i+"] is not valid!");
blockChooser.onUnSuccess(blockNum);
succeeded = false;
finished = false;
} else {
return buf;
}
} catch (CHKEncodeException e) {
// Should not be possible.
Logger.error(this, "Impossible: "+e);
return null;
}
}
}
Logger.error(this, "Block "+blockNum+" in blocksFound but not in blocksFetched on "+this);
return null;
}
synchronized void resumeCallback(int blockNo, SplitFileFetcherCrossSegmentStorage crossSegment) {
this.crossSegmentsByBlock[blockNo] = crossSegment;
}
public synchronized boolean hasBlock(int blockNo) {
return blockChooser.hasSucceeded(blockNo);
}
public synchronized boolean isDecoding() {
return tryDecode;
}
/** Called after checking datastore for a datastore-only request. */
public void onFinishedCheckingDatastoreNoFetch(ClientContext context) {
synchronized(this) {
if(tryDecode) return;
if(succeeded) return;
if(finished) return;
if(failed) return;
failed = true;
finished = true;
}
parent.finishedEncoding(this);
}
}