});
archiveManager = new ArchiveManager(MAX_ARCHIVE_HANDLERS, MAX_CACHED_ARCHIVE_DATA, MAX_ARCHIVED_FILE_SIZE, MAX_CACHED_ELEMENTS, tempBucketFactory);
healingQueue = new SimpleHealingQueue(
new InsertContext(
0, 2, 0, 0, new SimpleEventProducer(),
false, Node.FORK_ON_CACHEABLE_DEFAULT, false, Compressor.DEFAULT_COMPRESSORDESCRIPTOR, 0, 0, InsertContext.CompatibilityMode.COMPAT_DEFAULT), RequestStarter.PREFETCH_PRIORITY_CLASS, 512 /* FIXME make configurable */);
PooledFileRandomAccessBufferFactory raff =
new PooledFileRandomAccessBufferFactory(persistentFilenameGenerator, node.fastWeakRandom);
persistentDiskChecker =
new DiskSpaceCheckingRandomAccessBufferFactory(raff, persistentTempDir.dir(),
minDiskFreeLongTerm + tempBucketFactory.getMaxRamUsed());
persistentRAFFactory = new MaybeEncryptedRandomAccessBufferFactory(persistentDiskChecker, nodeConfig.getBoolean("encryptPersistentTempBuckets"));
persistentTempBucketFactory.setDiskSpaceChecker(persistentDiskChecker);
HighLevelSimpleClient client = makeClient((short)0, false, false);
FetchContext defaultFetchContext = client.getFetchContext();
InsertContext defaultInsertContext = client.getInsertContext(false);
int maxMemoryLimitedJobThreads = Runtime.getRuntime().availableProcessors() / 2; // Some disk I/O ... tunable REDFLAG
maxMemoryLimitedJobThreads = Math.min(maxMemoryLimitedJobThreads, node.nodeStats.getThreadLimit()/20);
maxMemoryLimitedJobThreads = Math.max(1, maxMemoryLimitedJobThreads);
// FIXME review thread limits. This isn't just memory, it's CPU and disk as well, so we don't want it too big??
// FIXME l10n the errors?