} else if(type.equals("CHK")) {
blockSize = CHKBlock.DATA_LENGTH;
oneBlockCompressedSize = CHKBlock.MAX_COMPRESSED_DATA_LENGTH;
isCHK = true;
} else {
throw new InsertException(InsertExceptionMode.INVALID_URI, "Unknown key type: "+type, null);
}
// Compressed data ; now insert it
// We do NOT need to switch threads here: the actual compression is done by InsertCompressor on the RealCompressor thread,
// which then switches either to the database thread or to a new executable to run this method.
if(parent == cb) {
short codecID = bestCodec == null ? -1 : bestCodec.metadataID;
ctx.eventProducer.produceEvent(new FinishedCompressionEvent(codecID, origSize, bestCompressedDataSize), context);
if(logMINOR) Logger.minor(this, "Compressed "+origSize+" to "+data.size()+" on "+this+" data = "+data);
}
// Insert it...
short codecNumber = bestCodec == null ? -1 : bestCodec.metadataID;
long compressedDataSize = data.size();
boolean fitsInOneBlockAsIs = bestCodec == null ? compressedDataSize <= blockSize : compressedDataSize <= oneBlockCompressedSize;
boolean fitsInOneCHK = bestCodec == null ? compressedDataSize <= CHKBlock.DATA_LENGTH : compressedDataSize <= CHKBlock.MAX_COMPRESSED_DATA_LENGTH;
if((fitsInOneBlockAsIs || fitsInOneCHK) && origSize > Integer.MAX_VALUE)
throw new InsertException(InsertExceptionMode.INTERNAL_ERROR, "2GB+ should not encode to one block!", null);
boolean noMetadata = ((block.clientMetadata == null) || block.clientMetadata.isTrivial()) && targetFilename == null;
if((noMetadata || metadata) && archiveType == null) {
if(fitsInOneBlockAsIs) {
if(persistent && (data instanceof NotPersistentBucket))
data = fixNotPersistent(data, context);
// Just insert it
ClientPutState bi =
createInserter(parent, data, codecNumber, ctx, cb, metadata, (int)origSize, -1, true, context, shouldFreeData, forSplitfile);
if(logMINOR)
Logger.minor(this, "Inserting without metadata: "+bi+" for "+this);
cb.onTransition(this, bi, context);
if(ctx.earlyEncode && bi instanceof SingleBlockInserter && isCHK)
((SingleBlockInserter)bi).getBlock(context, true);
bi.schedule(context);
if(!isUSK)
cb.onBlockSetFinished(this, context);
synchronized(this) {
started = true;
}
if(persistent) {
block.nullData();
block = null;
}
return;
}
}
if (fitsInOneCHK) {
// Insert single block, then insert pointer to it
if(persistent && (data instanceof NotPersistentBucket)) {
data = fixNotPersistent(data, context);
}
if(reportMetadataOnly) {
SingleBlockInserter dataPutter = new SingleBlockInserter(parent, data, codecNumber, FreenetURI.EMPTY_CHK_URI, ctx, realTimeFlag, cb, metadata, (int)origSize, -1, true, true, token, context, persistent, shouldFreeData, forSplitfile ? ctx.extraInsertsSplitfileHeaderBlock : ctx.extraInsertsSingleBlock, cryptoAlgorithm, forceCryptoKey);
if(logMINOR)
Logger.minor(this, "Inserting with metadata: "+dataPutter+" for "+this);
Metadata meta = makeMetadata(archiveType, dataPutter.getURI(context), hashes);
cb.onMetadata(meta, this, context);
cb.onTransition(this, dataPutter, context);
dataPutter.schedule(context);
if(!isUSK)
cb.onBlockSetFinished(this, context);
synchronized(this) {
// Don't delete them because they are being passed on.
origHashes = null;
}
} else {
MultiPutCompletionCallback mcb =
new MultiPutCompletionCallback(cb, parent, token, persistent, false, ctx.earlyEncode);
SingleBlockInserter dataPutter = new SingleBlockInserter(parent, data, codecNumber, FreenetURI.EMPTY_CHK_URI, ctx, realTimeFlag, mcb, metadata, (int)origSize, -1, true, false, token, context, persistent, shouldFreeData, forSplitfile ? ctx.extraInsertsSplitfileHeaderBlock : ctx.extraInsertsSingleBlock, cryptoAlgorithm, forceCryptoKey);
if(logMINOR)
Logger.minor(this, "Inserting data: "+dataPutter+" for "+this);
Metadata meta = makeMetadata(archiveType, dataPutter.getURI(context), hashes);
RandomAccessBucket metadataBucket;
try {
metadataBucket = meta.toBucket(context.getBucketFactory(persistent));
} catch (IOException e) {
Logger.error(this, "Caught "+e, e);
throw new InsertException(InsertExceptionMode.BUCKET_ERROR, e, null);
} catch (MetadataUnresolvedException e) {
// Impossible, we're not inserting a manifest.
Logger.error(this, "Caught "+e, e);
throw new InsertException(InsertExceptionMode.INTERNAL_ERROR, "Got MetadataUnresolvedException in SingleFileInserter: "+e.toString(), null);
}
ClientPutState metaPutter = createInserter(parent, metadataBucket, (short) -1, ctx, mcb, true, (int)origSize, -1, true, context, true, false);
if(logMINOR)
Logger.minor(this, "Inserting metadata: "+metaPutter+" for "+this);
mcb.addURIGenerator(metaPutter);
mcb.add(dataPutter);
cb.onTransition(this, mcb, context);
Logger.minor(this, ""+mcb+" : data "+dataPutter+" meta "+metaPutter);
mcb.arm(context);
dataPutter.schedule(context);
if(ctx.earlyEncode && metaPutter instanceof SingleBlockInserter)
((SingleBlockInserter)metaPutter).getBlock(context, true);
metaPutter.schedule(context);
if(!isUSK)
cb.onBlockSetFinished(this, context);
// Deleting origHashes is fine, we are done with them.
}
synchronized(this) {
started = true;
}
if(persistent) {
block.nullData();
block = null;
}
return;
}
// Otherwise the file is too big to fit into one block
// We therefore must make a splitfile
// Job of SplitHandler: when the splitinserter has the metadata,
// insert it. Then when the splitinserter has finished, and the
// metadata insert has finished too, tell the master callback.
LockableRandomAccessBuffer dataRAF;
try {
dataRAF = data.toRandomAccessBuffer();
} catch (IOException e) {
throw new InsertException(InsertExceptionMode.BUCKET_ERROR, e, null);
}
if(reportMetadataOnly) {
SplitFileInserter sfi = new SplitFileInserter(persistent, parent, cb,
dataRAF, shouldFreeData, ctx, context, origSize, bestCodec,
block.clientMetadata, metadata, archiveType, cryptoAlgorithm, forceCryptoKey,