// So it is passed on.
origHashes = hashes;
} else {
hashes = origHashes; // Inherit so it goes all the way to the top.
}
RandomAccessBucket bestCompressedData = output.data;
long bestCompressedDataSize = bestCompressedData.size();
RandomAccessBucket data = bestCompressedData;
COMPRESSOR_TYPE bestCodec = output.bestCodec;
boolean shouldFreeData = freeData;
if(bestCodec != null) {
if(logMINOR) Logger.minor(this, "The best compression algorithm is "+bestCodec+ " we have gained"+ (100-(bestCompressedDataSize*100/origSize)) +"% ! ("+origSize+'/'+bestCompressedDataSize+')');
shouldFreeData = true; // must be freed regardless of whether the original data was to be freed
if(freeData) {
block.getData().free();
}
block.nullData();
} else {
data = block.getData();
bestCompressedDataSize = origSize;
}
int blockSize;
int oneBlockCompressedSize;
boolean isCHK = false;
String type = block.desiredURI.getKeyType();
boolean isUSK = false;
if(type.equals("SSK") || type.equals("KSK") || (isUSK = type.equals("USK"))) {
blockSize = SSKBlock.DATA_LENGTH;
oneBlockCompressedSize = SSKBlock.MAX_COMPRESSED_DATA_LENGTH;
} else if(type.equals("CHK")) {
blockSize = CHKBlock.DATA_LENGTH;
oneBlockCompressedSize = CHKBlock.MAX_COMPRESSED_DATA_LENGTH;
isCHK = true;
} else {
throw new InsertException(InsertExceptionMode.INVALID_URI, "Unknown key type: "+type, null);
}
// Compressed data ; now insert it
// We do NOT need to switch threads here: the actual compression is done by InsertCompressor on the RealCompressor thread,
// which then switches either to the database thread or to a new executable to run this method.
if(parent == cb) {
short codecID = bestCodec == null ? -1 : bestCodec.metadataID;
ctx.eventProducer.produceEvent(new FinishedCompressionEvent(codecID, origSize, bestCompressedDataSize), context);
if(logMINOR) Logger.minor(this, "Compressed "+origSize+" to "+data.size()+" on "+this+" data = "+data);
}
// Insert it...
short codecNumber = bestCodec == null ? -1 : bestCodec.metadataID;
long compressedDataSize = data.size();
boolean fitsInOneBlockAsIs = bestCodec == null ? compressedDataSize <= blockSize : compressedDataSize <= oneBlockCompressedSize;
boolean fitsInOneCHK = bestCodec == null ? compressedDataSize <= CHKBlock.DATA_LENGTH : compressedDataSize <= CHKBlock.MAX_COMPRESSED_DATA_LENGTH;
if((fitsInOneBlockAsIs || fitsInOneCHK) && origSize > Integer.MAX_VALUE)
throw new InsertException(InsertExceptionMode.INTERNAL_ERROR, "2GB+ should not encode to one block!", null);
boolean noMetadata = ((block.clientMetadata == null) || block.clientMetadata.isTrivial()) && targetFilename == null;
if((noMetadata || metadata) && archiveType == null) {
if(fitsInOneBlockAsIs) {
if(persistent && (data instanceof NotPersistentBucket))
data = fixNotPersistent(data, context);
// Just insert it
ClientPutState bi =
createInserter(parent, data, codecNumber, ctx, cb, metadata, (int)origSize, -1, true, context, shouldFreeData, forSplitfile);
if(logMINOR)
Logger.minor(this, "Inserting without metadata: "+bi+" for "+this);
cb.onTransition(this, bi, context);
if(ctx.earlyEncode && bi instanceof SingleBlockInserter && isCHK)
((SingleBlockInserter)bi).getBlock(context, true);
bi.schedule(context);
if(!isUSK)
cb.onBlockSetFinished(this, context);
synchronized(this) {
started = true;
}
if(persistent) {
block.nullData();
block = null;
}
return;
}
}
if (fitsInOneCHK) {
// Insert single block, then insert pointer to it
if(persistent && (data instanceof NotPersistentBucket)) {
data = fixNotPersistent(data, context);
}
if(reportMetadataOnly) {
SingleBlockInserter dataPutter = new SingleBlockInserter(parent, data, codecNumber, FreenetURI.EMPTY_CHK_URI, ctx, realTimeFlag, cb, metadata, (int)origSize, -1, true, true, token, context, persistent, shouldFreeData, forSplitfile ? ctx.extraInsertsSplitfileHeaderBlock : ctx.extraInsertsSingleBlock, cryptoAlgorithm, forceCryptoKey);
if(logMINOR)
Logger.minor(this, "Inserting with metadata: "+dataPutter+" for "+this);
Metadata meta = makeMetadata(archiveType, dataPutter.getURI(context), hashes);
cb.onMetadata(meta, this, context);
cb.onTransition(this, dataPutter, context);
dataPutter.schedule(context);
if(!isUSK)
cb.onBlockSetFinished(this, context);
synchronized(this) {
// Don't delete them because they are being passed on.
origHashes = null;
}
} else {
MultiPutCompletionCallback mcb =
new MultiPutCompletionCallback(cb, parent, token, persistent, false, ctx.earlyEncode);
SingleBlockInserter dataPutter = new SingleBlockInserter(parent, data, codecNumber, FreenetURI.EMPTY_CHK_URI, ctx, realTimeFlag, mcb, metadata, (int)origSize, -1, true, false, token, context, persistent, shouldFreeData, forSplitfile ? ctx.extraInsertsSplitfileHeaderBlock : ctx.extraInsertsSingleBlock, cryptoAlgorithm, forceCryptoKey);
if(logMINOR)
Logger.minor(this, "Inserting data: "+dataPutter+" for "+this);
Metadata meta = makeMetadata(archiveType, dataPutter.getURI(context), hashes);
RandomAccessBucket metadataBucket;
try {
metadataBucket = meta.toBucket(context.getBucketFactory(persistent));
} catch (IOException e) {
Logger.error(this, "Caught "+e, e);
throw new InsertException(InsertExceptionMode.BUCKET_ERROR, e, null);