if (get().equals(val))
return;
synchronized(Node.this) {
isAllowedToConnectToSeednodes = val;
if(opennet != null)
throw new NodeNeedRestartException(l10n("connectToSeednodesCannotBeChangedMustDisableOpennetOrReboot"));
}
}
});
isAllowedToConnectToSeednodes = opennetConfig.getBoolean("connectToSeednodes");
// Can be enabled on the fly
opennetConfig.register("enabled", false, 0, true, true, "Node.opennetEnabled", "Node.opennetEnabledLong", new BooleanCallback() {
@Override
public Boolean get() {
synchronized(Node.this) {
return opennet != null;
}
}
@Override
public void set(Boolean val) throws InvalidConfigValueException {
OpennetManager o;
synchronized(Node.this) {
if(val == (opennet != null)) return;
if(val) {
try {
o = opennet = new OpennetManager(Node.this, opennetCryptoConfig, System.currentTimeMillis(), isAllowedToConnectToSeednodes);
} catch (NodeInitException e) {
opennet = null;
throw new InvalidConfigValueException(e.getMessage());
}
} else {
o = opennet;
opennet = null;
}
}
if(val) o.start();
else o.stop(true);
ipDetector.ipDetectorManager.notifyPortChange(getPublicInterfacePorts());
}
});
boolean opennetEnabled = opennetConfig.getBoolean("enabled");
opennetConfig.register("maxOpennetPeers", OpennetManager.MAX_PEERS_FOR_SCALING, 1, true, false, "Node.maxOpennetPeers",
"Node.maxOpennetPeersLong", new IntCallback() {
@Override
public Integer get() {
return maxOpennetPeers;
}
@Override
public void set(Integer inputMaxOpennetPeers) throws InvalidConfigValueException {
if(inputMaxOpennetPeers < 0) throw new InvalidConfigValueException(l10n("mustBePositive"));
if(inputMaxOpennetPeers > OpennetManager.MAX_PEERS_FOR_SCALING) throw new InvalidConfigValueException(l10n("maxOpennetPeersMustBeTwentyOrLess", "maxpeers", Integer.toString(OpennetManager.MAX_PEERS_FOR_SCALING)));
maxOpennetPeers = inputMaxOpennetPeers;
}
}
, false);
maxOpennetPeers = opennetConfig.getInt("maxOpennetPeers");
if(maxOpennetPeers > OpennetManager.MAX_PEERS_FOR_SCALING) {
Logger.error(this, "maxOpennetPeers may not be over "+OpennetManager.MAX_PEERS_FOR_SCALING);
maxOpennetPeers = OpennetManager.MAX_PEERS_FOR_SCALING;
}
opennetCryptoConfig = new NodeCryptoConfig(opennetConfig, 2 /* 0 = enabled */, true, securityLevels);
if(opennetEnabled) {
opennet = new OpennetManager(this, opennetCryptoConfig, System.currentTimeMillis(), isAllowedToConnectToSeednodes);
// Will be started later
} else {
opennet = null;
}
securityLevels.addNetworkThreatLevelListener(new SecurityLevelListener<NETWORK_THREAT_LEVEL>() {
@Override
public void onChange(NETWORK_THREAT_LEVEL oldLevel, NETWORK_THREAT_LEVEL newLevel) {
if(newLevel == NETWORK_THREAT_LEVEL.HIGH
|| newLevel == NETWORK_THREAT_LEVEL.MAXIMUM) {
OpennetManager om;
synchronized(Node.this) {
om = opennet;
if(om != null)
opennet = null;
}
if(om != null) {
om.stop(true);
ipDetector.ipDetectorManager.notifyPortChange(getPublicInterfacePorts());
}
} else if(newLevel == NETWORK_THREAT_LEVEL.NORMAL
|| newLevel == NETWORK_THREAT_LEVEL.LOW) {
OpennetManager o = null;
synchronized(Node.this) {
if(opennet == null) {
try {
o = opennet = new OpennetManager(Node.this, opennetCryptoConfig, System.currentTimeMillis(), isAllowedToConnectToSeednodes);
} catch (NodeInitException e) {
opennet = null;
Logger.error(this, "UNABLE TO ENABLE OPENNET: "+e, e);
clientCore.alerts.register(new SimpleUserAlert(false, l10n("enableOpennetFailedTitle"), l10n("enableOpennetFailed", "message", e.getLocalizedMessage()), l10n("enableOpennetFailed", "message", e.getLocalizedMessage()), UserAlert.ERROR));
}
}
}
if(o != null) {
o.start();
ipDetector.ipDetectorManager.notifyPortChange(getPublicInterfacePorts());
}
}
Node.this.config.store();
}
});
opennetConfig.register("acceptSeedConnections", false, 2, true, true, "Node.acceptSeedConnectionsShort", "Node.acceptSeedConnections", new BooleanCallback() {
@Override
public Boolean get() {
return acceptSeedConnections;
}
@Override
public void set(Boolean val) throws InvalidConfigValueException {
acceptSeedConnections = val;
}
});
acceptSeedConnections = opennetConfig.getBoolean("acceptSeedConnections");
if(acceptSeedConnections && opennet != null)
opennet.crypto.socket.getAddressTracker().setHugeTracker();
opennetConfig.finishedInitialization();
nodeConfig.register("passOpennetPeersThroughDarknet", true, sortOrder++, true, false, "Node.passOpennetPeersThroughDarknet", "Node.passOpennetPeersThroughDarknetLong",
new BooleanCallback() {
@Override
public Boolean get() {
synchronized(Node.this) {
return passOpennetRefsThroughDarknet;
}
}
@Override
public void set(Boolean val) throws InvalidConfigValueException {
synchronized(Node.this) {
passOpennetRefsThroughDarknet = val;
}
}
});
passOpennetRefsThroughDarknet = nodeConfig.getBoolean("passOpennetPeersThroughDarknet");
this.extraPeerDataDir = userDir.file("extra-peer-data-"+getDarknetPortNumber());
if (!((extraPeerDataDir.exists() && extraPeerDataDir.isDirectory()) || (extraPeerDataDir.mkdir()))) {
String msg = "Could not find or create extra peer data directory";
throw new NodeInitException(NodeInitException.EXIT_BAD_DIR, msg);
}
// Name
nodeConfig.register("name", myName, sortOrder++, false, true, "Node.nodeName", "Node.nodeNameLong",
new NodeNameCallback());
myName = nodeConfig.getString("name");
// Datastore
nodeConfig.register("storeForceBigShrinks", false, sortOrder++, true, false, "Node.forceBigShrink", "Node.forceBigShrinkLong",
new BooleanCallback() {
@Override
public Boolean get() {
synchronized(Node.this) {
return storeForceBigShrinks;
}
}
@Override
public void set(Boolean val) throws InvalidConfigValueException {
synchronized(Node.this) {
storeForceBigShrinks = val;
}
}
});
// Datastore
nodeConfig.register("storeType", "ram", sortOrder++, true, true, "Node.storeType", "Node.storeTypeLong", new StoreTypeCallback());
storeType = nodeConfig.getString("storeType");
/*
* Very small initial store size, since the node will preallocate it when starting up for the first time,
* BLOCKING STARTUP, and since everyone goes through the wizard anyway...
*/
nodeConfig.register("storeSize", DEFAULT_STORE_SIZE, sortOrder++, false, true, "Node.storeSize", "Node.storeSizeLong",
new LongCallback() {
@Override
public Long get() {
return maxTotalDatastoreSize;
}
@Override
public void set(Long storeSize) throws InvalidConfigValueException {
if(storeSize < MIN_STORE_SIZE)
throw new InvalidConfigValueException(l10n("invalidStoreSize"));
long newMaxStoreKeys = storeSize / sizePerKey;
if(newMaxStoreKeys == maxTotalKeys) return;
// Update each datastore
synchronized(Node.this) {
maxTotalDatastoreSize = storeSize;
maxTotalKeys = newMaxStoreKeys;
maxStoreKeys = maxTotalKeys / 2;
maxCacheKeys = maxTotalKeys - maxStoreKeys;
}
try {
chkDatastore.setMaxKeys(maxStoreKeys, storeForceBigShrinks);
chkDatacache.setMaxKeys(maxCacheKeys, storeForceBigShrinks);
pubKeyDatastore.setMaxKeys(maxStoreKeys, storeForceBigShrinks);
pubKeyDatacache.setMaxKeys(maxCacheKeys, storeForceBigShrinks);
sskDatastore.setMaxKeys(maxStoreKeys, storeForceBigShrinks);
sskDatacache.setMaxKeys(maxCacheKeys, storeForceBigShrinks);
} catch (IOException e) {
// FIXME we need to be able to tell the user.
Logger.error(this, "Caught "+e+" resizing the datastore", e);
System.err.println("Caught "+e+" resizing the datastore");
e.printStackTrace();
}
//Perhaps a bit hackish...? Seems like this should be near it's definition in NodeStats.
nodeStats.avgStoreCHKLocation.changeMaxReports((int)maxStoreKeys);
nodeStats.avgCacheCHKLocation.changeMaxReports((int)maxCacheKeys);
nodeStats.avgSlashdotCacheCHKLocation.changeMaxReports((int)maxCacheKeys);
nodeStats.avgClientCacheCHKLocation.changeMaxReports((int)maxCacheKeys);
nodeStats.avgStoreSSKLocation.changeMaxReports((int)maxStoreKeys);
nodeStats.avgCacheSSKLocation.changeMaxReports((int)maxCacheKeys);
nodeStats.avgSlashdotCacheSSKLocation.changeMaxReports((int)maxCacheKeys);
nodeStats.avgClientCacheSSKLocation.changeMaxReports((int)maxCacheKeys);
}
}, true);
maxTotalDatastoreSize = nodeConfig.getLong("storeSize");
if(maxTotalDatastoreSize < MIN_STORE_SIZE && !storeType.equals("ram")) { // totally arbitrary minimum!
throw new NodeInitException(NodeInitException.EXIT_INVALID_STORE_SIZE, "Store size too small");
}
maxTotalKeys = maxTotalDatastoreSize / sizePerKey;
nodeConfig.register("storeUseSlotFilters", true, sortOrder++, true, false, "Node.storeUseSlotFilters", "Node.storeUseSlotFiltersLong", new BooleanCallback() {
public Boolean get() {
synchronized(Node.this) {
return storeUseSlotFilters;
}
}
public void set(Boolean val) throws InvalidConfigValueException,
NodeNeedRestartException {
synchronized(Node.this) {
storeUseSlotFilters = val;
}
// FIXME l10n
throw new NodeNeedRestartException("Need to restart to change storeUseSlotFilters");
}
});
storeUseSlotFilters = nodeConfig.getBoolean("storeUseSlotFilters");
nodeConfig.register("storeSaltHashSlotFilterPersistenceTime", ResizablePersistentIntBuffer.DEFAULT_PERSISTENCE_TIME, sortOrder++, true, false,
"Node.storeSaltHashSlotFilterPersistenceTime", "Node.storeSaltHashSlotFilterPersistenceTimeLong", new IntCallback() {
@Override
public Integer get() {
return ResizablePersistentIntBuffer.getPersistenceTime();
}
@Override
public void set(Integer val)
throws InvalidConfigValueException,
NodeNeedRestartException {
if(val >= -1)
ResizablePersistentIntBuffer.setPersistenceTime(val);
else
throw new InvalidConfigValueException(l10n("slotFilterPersistenceTimeError"));
}
}, false);
nodeConfig.register("storeSaltHashResizeOnStart", false, sortOrder++, true, false,
"Node.storeSaltHashResizeOnStart", "Node.storeSaltHashResizeOnStartLong", new BooleanCallback() {
@Override
public Boolean get() {
return storeSaltHashResizeOnStart;
}
@Override
public void set(Boolean val) throws InvalidConfigValueException, NodeNeedRestartException {
storeSaltHashResizeOnStart = val;
}
});
storeSaltHashResizeOnStart = nodeConfig.getBoolean("storeSaltHashResizeOnStart");
this.storeDir = setupProgramDir(installConfig, "storeDir", userDir().file("datastore").getPath(), "Node.storeDirectory", "Node.storeDirectoryLong", nodeConfig);
installConfig.finishedInitialization();
final String suffix = getStoreSuffix();
maxStoreKeys = maxTotalKeys / 2;
maxCacheKeys = maxTotalKeys - maxStoreKeys;
/*
* On Windows, setting the file length normally involves writing lots of zeros.
* So it's an uninterruptible system call that takes a loooong time. On OS/X,
* presumably the same is true. If the RNG is fast enough, this means that
* setting the length and writing random data take exactly the same amount
* of time. On most versions of Unix, holes can be created. However on all
* systems, predictable disk usage is a good thing. So lets turn it on by
* default for now, on all systems. The datastore can be read but mostly not
* written while the random data is being written.
*/
nodeConfig.register("storePreallocate", true, sortOrder++, true, true, "Node.storePreallocate", "Node.storePreallocateLong",
new BooleanCallback() {
@Override
public Boolean get() {
return storePreallocate;
}
@Override
public void set(Boolean val) throws InvalidConfigValueException, NodeNeedRestartException {
storePreallocate = val;
if (storeType.equals("salt-hash")) {
setPreallocate(chkDatastore, val);
setPreallocate(chkDatacache, val);
setPreallocate(pubKeyDatastore, val);
setPreallocate(pubKeyDatacache, val);
setPreallocate(sskDatastore, val);
setPreallocate(sskDatacache, val);
}
}
private void setPreallocate(StoreCallback<?> datastore,
boolean val) {
// Avoid race conditions by checking first.
FreenetStore<?> store = datastore.getStore();
if(store instanceof SaltedHashFreenetStore)
((SaltedHashFreenetStore<?>)store).setPreallocate(val);
}}
);
storePreallocate = nodeConfig.getBoolean("storePreallocate");
if(File.separatorChar == '/' && System.getProperty("os.name").toLowerCase().indexOf("mac os") < 0) {
securityLevels.addPhysicalThreatLevelListener(new SecurityLevelListener<SecurityLevels.PHYSICAL_THREAT_LEVEL>() {
@Override
public void onChange(PHYSICAL_THREAT_LEVEL oldLevel, PHYSICAL_THREAT_LEVEL newLevel) {
try {
if(newLevel == PHYSICAL_THREAT_LEVEL.LOW)
nodeConfig.set("storePreallocate", false);
else
nodeConfig.set("storePreallocate", true);
} catch (NodeNeedRestartException e) {
// Ignore
} catch (InvalidConfigValueException e) {
// Ignore
}
}
});
}
securityLevels.addPhysicalThreatLevelListener(new SecurityLevelListener<SecurityLevels.PHYSICAL_THREAT_LEVEL>() {
@Override
public void onChange(PHYSICAL_THREAT_LEVEL oldLevel, PHYSICAL_THREAT_LEVEL newLevel) {
if(newLevel == PHYSICAL_THREAT_LEVEL.MAXIMUM) {
synchronized(this) {
clientCacheAwaitingPassword = false;
databaseAwaitingPassword = false;
}
try {
killMasterKeysFile();
clientCore.clientLayerPersister.disableWrite();
clientCore.clientLayerPersister.waitForNotWriting();
clientCore.clientLayerPersister.deleteAllFiles();
} catch (IOException e) {
masterKeysFile.delete();
Logger.error(this, "Unable to securely delete "+masterKeysFile);
System.err.println(NodeL10n.getBase().getString("SecurityLevels.cantDeletePasswordFile", "filename", masterKeysFile.getAbsolutePath()));
clientCore.alerts.register(new SimpleUserAlert(true, NodeL10n.getBase().getString("SecurityLevels.cantDeletePasswordFileTitle"), NodeL10n.getBase().getString("SecurityLevels.cantDeletePasswordFile"), NodeL10n.getBase().getString("SecurityLevels.cantDeletePasswordFileTitle"), UserAlert.CRITICAL_ERROR));
}
}
if(oldLevel == PHYSICAL_THREAT_LEVEL.MAXIMUM && newLevel != PHYSICAL_THREAT_LEVEL.HIGH) {
// Not passworded.
// Create the master.keys.
// Keys must exist.
try {
MasterKeys keys;
synchronized(this) {
keys = Node.this.keys;
}
keys.changePassword(masterKeysFile, "", secureRandom);
} catch (IOException e) {
Logger.error(this, "Unable to create encryption keys file: "+masterKeysFile+" : "+e, e);
System.err.println("Unable to create encryption keys file: "+masterKeysFile+" : "+e);
e.printStackTrace();
}
}
}
});
if(securityLevels.physicalThreatLevel == PHYSICAL_THREAT_LEVEL.MAXIMUM) {
try {
killMasterKeysFile();
} catch (IOException e) {
String msg = "Unable to securely delete old master.keys file when switching to MAXIMUM seclevel!!";
System.err.println(msg);
throw new NodeInitException(NodeInitException.EXIT_CANT_WRITE_MASTER_KEYS, msg);
}
}
long defaultCacheSize;
long memoryLimit = NodeStarter.getMemoryLimitBytes();
// This is tricky because systems with low memory probably also have slow disks, but using
// up too much memory can be catastrophic...
// Total alchemy, FIXME!
if(memoryLimit == Long.MAX_VALUE || memoryLimit < 0)
defaultCacheSize = 1024*1024;
else if(memoryLimit <= 128*1024*1024)
defaultCacheSize = 0; // Turn off completely for very small memory.
else {
// 9 stores, total should be 5% of memory, up to maximum of 1MB per store at 308MB+
defaultCacheSize = Math.min(1024*1024, (memoryLimit - 128*1024*1024) / (20*9));
}
nodeConfig.register("cachingFreenetStoreMaxSize", defaultCacheSize, sortOrder++, true, false, "Node.cachingFreenetStoreMaxSize", "Node.cachingFreenetStoreMaxSizeLong",
new LongCallback() {
@Override
public Long get() {
synchronized(Node.this) {
return cachingFreenetStoreMaxSize;
}
}
@Override
public void set(Long val) throws InvalidConfigValueException, NodeNeedRestartException {
if(val < 0) throw new InvalidConfigValueException(l10n("invalidMemoryCacheSize"));
// Any positive value is legal. In particular, e.g. 1200 bytes would cause us to cache SSKs but not CHKs.
synchronized(Node.this) {
cachingFreenetStoreMaxSize = val;
}
throw new NodeNeedRestartException("Caching Maximum Size cannot be changed on the fly");
}
}, true);
cachingFreenetStoreMaxSize = nodeConfig.getLong("cachingFreenetStoreMaxSize");
if(cachingFreenetStoreMaxSize < 0)
throw new NodeInitException(NodeInitException.EXIT_BAD_CONFIG, l10n("invalidMemoryCacheSize"));
nodeConfig.register("cachingFreenetStorePeriod", "300k", sortOrder++, true, false, "Node.cachingFreenetStorePeriod", "Node.cachingFreenetStorePeriod",
new LongCallback() {
@Override
public Long get() {
synchronized(Node.this) {
return cachingFreenetStorePeriod;
}
}
@Override
public void set(Long val) throws InvalidConfigValueException, NodeNeedRestartException {
synchronized(Node.this) {
cachingFreenetStorePeriod = val;
}
throw new NodeNeedRestartException("Caching Period cannot be changed on the fly");
}
}, true);
cachingFreenetStorePeriod = nodeConfig.getLong("cachingFreenetStorePeriod");