@Override
public void set(String val) throws InvalidConfigValueException, NodeNeedRestartException {
// FIXME l10n
// FIXME wipe the old one and move
throw new InvalidConfigValueException("Node.masterKeyFile cannot be changed on the fly, you must shutdown, wipe the old file and reconfigure");
}
});
String value = nodeConfig.getString("masterKeyFile");
File f;
if (value.equalsIgnoreCase("none")) {
f = null;
} else {
f = new File(value);
if(f.exists() && !(f.canWrite() && f.canRead()))
throw new NodeInitException(NodeInitException.EXIT_CANT_WRITE_MASTER_KEYS, "Cannot read from and write to master keys file "+f);
}
masterKeysFile = f;
FileUtil.setOwnerRW(masterKeysFile);
nodeConfig.register("showFriendsVisibilityAlert", false, sortOrder++, true, false, "Node.showFriendsVisibilityAlert", "Node.showFriendsVisibilityAlertLong", new BooleanCallback() {
@Override
public Boolean get() {
synchronized(Node.this) {
return showFriendsVisibilityAlert;
}
}
@Override
public void set(Boolean val) throws InvalidConfigValueException,
NodeNeedRestartException {
synchronized(this) {
if(val == showFriendsVisibilityAlert) return;
if(val) return;
}
unregisterFriendsVisibilityAlert();
}
});
showFriendsVisibilityAlert = nodeConfig.getBoolean("showFriendsVisibilityAlert");
dbFile = userDir.file("node.db4o");
dbFileCrypt = userDir.file("node.db4o.crypt");
byte[] clientCacheKey = null;
MasterSecret persistentSecret = null;
for(int i=0;i<2; i++) {
try {
if(securityLevels.physicalThreatLevel == PHYSICAL_THREAT_LEVEL.MAXIMUM) {
keys = MasterKeys.createRandom(secureRandom);
} else {
keys = MasterKeys.read(masterKeysFile, secureRandom, "");
}
clientCacheKey = keys.clientCacheMasterKey;
persistentSecret = keys.getPersistentMasterSecret();
databaseKey = keys.createDatabaseKey(secureRandom);
if(securityLevels.getPhysicalThreatLevel() == PHYSICAL_THREAT_LEVEL.HIGH) {
System.err.println("Physical threat level is set to HIGH but no password, resetting to NORMAL - probably timing glitch");
securityLevels.resetPhysicalThreatLevel(PHYSICAL_THREAT_LEVEL.NORMAL);
}
break;
} catch (MasterKeysWrongPasswordException e) {
break;
} catch (MasterKeysFileSizeException e) {
System.err.println("Impossible: master keys file "+masterKeysFile+" too " + e.sizeToString() + "! Deleting to enable startup, but you will lose your client cache.");
masterKeysFile.delete();
} catch (IOException e) {
break;
}
}
try {
setupDatabase(databaseKey);
} catch (MasterKeysWrongPasswordException e2) {
System.out.println("Client database node.db4o is encrypted!");
databaseAwaitingPassword = true;
} catch (MasterKeysFileSizeException e2) {
System.err.println("Unable to decrypt database: master.keys file too " + e2.sizeToString() + "!");
} catch (IOException e2) {
System.err.println("Unable to access master.keys file to decrypt database: "+e2);
e2.printStackTrace();
}
// Boot ID
bootID = random.nextLong();
// Fixed length file containing boot ID. Accessed with random access file. So hopefully it will always be
// written. Note that we set lastBootID to -1 if we can't _write_ our ID as well as if we can't read it,
// because if we can't write it then we probably couldn't write it on the last bootup either.
File bootIDFile = runDir.file("bootID");
int BOOT_FILE_LENGTH = 64 / 4; // A long in padded hex bytes
long oldBootID = -1;
RandomAccessFile raf = null;
try {
raf = new RandomAccessFile(bootIDFile, "rw");
if(raf.length() < BOOT_FILE_LENGTH) {
oldBootID = -1;
} else {
byte[] buf = new byte[BOOT_FILE_LENGTH];
raf.readFully(buf);
String s = new String(buf, "ISO-8859-1");
try {
oldBootID = Fields.bytesToLong(HexUtil.hexToBytes(s));
} catch (NumberFormatException e) {
oldBootID = -1;
}
raf.seek(0);
}
String s = HexUtil.bytesToHex(Fields.longToBytes(bootID));
byte[] buf = s.getBytes("ISO-8859-1");
if(buf.length != BOOT_FILE_LENGTH)
System.err.println("Not 16 bytes for boot ID "+bootID+" - WTF??");
raf.write(buf);
} catch (IOException e) {
oldBootID = -1;
// If we have an error in reading, *or in writing*, we don't reliably know the last boot ID.
} finally {
Closer.close(raf);
}
lastBootID = oldBootID;
nodeConfig.register("disableProbabilisticHTLs", false, sortOrder++, true, false, "Node.disablePHTLS", "Node.disablePHTLSLong",
new BooleanCallback() {
@Override
public Boolean get() {
return disableProbabilisticHTLs;
}
@Override
public void set(Boolean val) throws InvalidConfigValueException {
disableProbabilisticHTLs = val;
}
});
disableProbabilisticHTLs = nodeConfig.getBoolean("disableProbabilisticHTLs");
nodeConfig.register("maxHTL", DEFAULT_MAX_HTL, sortOrder++, true, false, "Node.maxHTL", "Node.maxHTLLong", new ShortCallback() {
@Override
public Short get() {
return maxHTL;
}
@Override
public void set(Short val) throws InvalidConfigValueException {
if(maxHTL < 0) throw new InvalidConfigValueException("Impossible max HTL");
maxHTL = val;
}
}, false);
maxHTL = nodeConfig.getShort("maxHTL");
// FIXME maybe these should persist? They need to be private.
decrementAtMax = random.nextDouble() <= DECREMENT_AT_MAX_PROB;
decrementAtMin = random.nextDouble() <= DECREMENT_AT_MIN_PROB;
// Determine where to bind to
usm = new MessageCore(executor);
// FIXME maybe these configs should actually be under a node.ip subconfig?
ipDetector = new NodeIPDetector(this);
sortOrder = ipDetector.registerConfigs(nodeConfig, sortOrder);
// ARKs enabled?
nodeConfig.register("enableARKs", true, sortOrder++, true, false, "Node.enableARKs", "Node.enableARKsLong", new BooleanCallback() {
@Override
public Boolean get() {
return enableARKs;
}
@Override
public void set(Boolean val) throws InvalidConfigValueException {
throw new InvalidConfigValueException("Cannot change on the fly");
}
@Override
public boolean isReadOnly() {
return true;
}
});
enableARKs = nodeConfig.getBoolean("enableARKs");
nodeConfig.register("enablePerNodeFailureTables", true, sortOrder++, true, false, "Node.enablePerNodeFailureTables", "Node.enablePerNodeFailureTablesLong", new BooleanCallback() {
@Override
public Boolean get() {
return enablePerNodeFailureTables;
}
@Override
public void set(Boolean val) throws InvalidConfigValueException {
throw new InvalidConfigValueException("Cannot change on the fly");
}
@Override
public boolean isReadOnly() {
return true;
}
});
enablePerNodeFailureTables = nodeConfig.getBoolean("enablePerNodeFailureTables");
nodeConfig.register("enableULPRDataPropagation", true, sortOrder++, true, false, "Node.enableULPRDataPropagation", "Node.enableULPRDataPropagationLong", new BooleanCallback() {
@Override
public Boolean get() {
return enableULPRDataPropagation;
}
@Override
public void set(Boolean val) throws InvalidConfigValueException {
throw new InvalidConfigValueException("Cannot change on the fly");
}
@Override
public boolean isReadOnly() {
return true;
}
});
enableULPRDataPropagation = nodeConfig.getBoolean("enableULPRDataPropagation");
nodeConfig.register("enableSwapping", true, sortOrder++, true, false, "Node.enableSwapping", "Node.enableSwappingLong", new BooleanCallback() {
@Override
public Boolean get() {
return enableSwapping;
}
@Override
public void set(Boolean val) throws InvalidConfigValueException {
throw new InvalidConfigValueException("Cannot change on the fly");
}
@Override
public boolean isReadOnly() {
return true;
}
});
enableSwapping = nodeConfig.getBoolean("enableSwapping");
/*
* Publish our peers' locations is enabled, even in MAXIMUM network security and/or HIGH friends security,
* because a node which doesn't publish its peers' locations will get dramatically less traffic.
*
* Publishing our peers' locations does make us slightly more vulnerable to some attacks, but I don't think
* it's a big difference: swapping reveals the same information, it just doesn't update as quickly. This
* may help slightly, but probably not dramatically against a clever attacker.
*
* FIXME review this decision.
*/
nodeConfig.register("publishOurPeersLocation", true, sortOrder++, true, false, "Node.publishOurPeersLocation", "Node.publishOurPeersLocationLong", new BooleanCallback() {
@Override
public Boolean get() {
return publishOurPeersLocation;
}
@Override
public void set(Boolean val) throws InvalidConfigValueException {
publishOurPeersLocation = val;
}
});
publishOurPeersLocation = nodeConfig.getBoolean("publishOurPeersLocation");
nodeConfig.register("routeAccordingToOurPeersLocation", true, sortOrder++, true, false, "Node.routeAccordingToOurPeersLocation", "Node.routeAccordingToOurPeersLocationLong", new BooleanCallback() {
@Override
public Boolean get() {
return routeAccordingToOurPeersLocation;
}
@Override
public void set(Boolean val) throws InvalidConfigValueException {
routeAccordingToOurPeersLocation = val;
}
});
routeAccordingToOurPeersLocation = nodeConfig.getBoolean("routeAccordingToOurPeersLocation");
nodeConfig.register("enableSwapQueueing", true, sortOrder++, true, false, "Node.enableSwapQueueing", "Node.enableSwapQueueingLong", new BooleanCallback() {
@Override
public Boolean get() {
return enableSwapQueueing;
}
@Override
public void set(Boolean val) throws InvalidConfigValueException {
enableSwapQueueing = val;
}
});
enableSwapQueueing = nodeConfig.getBoolean("enableSwapQueueing");
nodeConfig.register("enablePacketCoalescing", true, sortOrder++, true, false, "Node.enablePacketCoalescing", "Node.enablePacketCoalescingLong", new BooleanCallback() {
@Override
public Boolean get() {
return enablePacketCoalescing;
}
@Override
public void set(Boolean val) throws InvalidConfigValueException {
enablePacketCoalescing = val;
}
});
enablePacketCoalescing = nodeConfig.getBoolean("enablePacketCoalescing");
// Determine the port number
// @see #191
if(oldConfig != null && "-1".equals(oldConfig.get("node.listenPort")))
throw new NodeInitException(NodeInitException.EXIT_COULD_NOT_BIND_USM, "Your freenet.ini file is corrupted! 'listenPort=-1'");
NodeCryptoConfig darknetConfig = new NodeCryptoConfig(nodeConfig, sortOrder++, false, securityLevels);
sortOrder += NodeCryptoConfig.OPTION_COUNT;
darknetCrypto = new NodeCrypto(this, false, darknetConfig, startupTime, enableARKs);
nodeDBHandle = darknetCrypto.getNodeHandle(db);
if(db != null) {
db.commit();
if(logMINOR) Logger.minor(this, "COMMITTED");
}
// Must be created after darknetCrypto
dnsr = new DNSRequester(this);
ps = new PacketSender(this);
ticker = new PrioritizedTicker(executor, getDarknetPortNumber());
if(executor instanceof PooledExecutor)
((PooledExecutor)executor).setTicker(ticker);
Logger.normal(Node.class, "Creating node...");
shutdownHook.addEarlyJob(new Thread() {
@Override
public void run() {
if (opennet != null)
opennet.stop(false);
}
});
shutdownHook.addEarlyJob(new Thread() {
@Override
public void run() {
darknetCrypto.stop();
}
});
// Bandwidth limit
nodeConfig.register("outputBandwidthLimit", "15K", sortOrder++, false, true, "Node.outBWLimit", "Node.outBWLimitLong", new IntCallback() {
@Override
public Integer get() {
//return BlockTransmitter.getHardBandwidthLimit();
return outputBandwidthLimit;
}
@Override
public void set(Integer obwLimit) throws InvalidConfigValueException {
checkOutputBandwidthLimit(obwLimit);
try {
outputThrottle.changeNanosAndBucketSize(SECONDS.toNanos(1) / obwLimit, obwLimit/2);
nodeStats.setOutputLimit(obwLimit);
} catch (IllegalArgumentException e) {
throw new InvalidConfigValueException(e);
}
synchronized(Node.this) {
outputBandwidthLimit = obwLimit;
}
}
});
int obwLimit = nodeConfig.getInt("outputBandwidthLimit");
outputBandwidthLimit = obwLimit;
try {
checkOutputBandwidthLimit(outputBandwidthLimit);
} catch (InvalidConfigValueException e) {
throw new NodeInitException(NodeInitException.EXIT_BAD_BWLIMIT, e.getMessage());
}
// Bucket size of 0.5 seconds' worth of bytes.
// Add them at a rate determined by the obwLimit.
// Maximum forced bytes 80%, in other words, 20% of the bandwidth is reserved for
// block transfers, so we will use that 20% for block transfers even if more than 80% of the limit is used for non-limited data (resends etc).
int bucketSize = obwLimit/2;
// Must have at least space for ONE PACKET.
// FIXME: make compatible with alternate transports.
bucketSize = Math.max(bucketSize, 2048);
try {
outputThrottle = new TokenBucket(bucketSize, SECONDS.toNanos(1) / obwLimit, obwLimit/2);
} catch (IllegalArgumentException e) {
throw new NodeInitException(NodeInitException.EXIT_BAD_BWLIMIT, e.getMessage());
}
nodeConfig.register("inputBandwidthLimit", "-1", sortOrder++, false, true, "Node.inBWLimit", "Node.inBWLimitLong", new IntCallback() {
@Override
public Integer get() {
if(inputLimitDefault) return -1;
return inputBandwidthLimit;
}
@Override
public void set(Integer ibwLimit) throws InvalidConfigValueException {
synchronized(Node.this) {
checkInputBandwidthLimit(ibwLimit);
if(ibwLimit == -1) {
inputLimitDefault = true;
ibwLimit = outputBandwidthLimit * 4;
} else {
inputLimitDefault = false;
}
try {
nodeStats.setInputLimit(ibwLimit);
} catch (IllegalArgumentException e) {
throw new InvalidConfigValueException(e);
}
inputBandwidthLimit = ibwLimit;
}
}
});
int ibwLimit = nodeConfig.getInt("inputBandwidthLimit");
if(ibwLimit == -1) {
inputLimitDefault = true;
ibwLimit = obwLimit * 4;
}
inputBandwidthLimit = ibwLimit;
try {
checkInputBandwidthLimit(inputBandwidthLimit);
} catch (InvalidConfigValueException e) {
throw new NodeInitException(NodeInitException.EXIT_BAD_BWLIMIT, e.getMessage());
}
nodeConfig.register("throttleLocalTraffic", false, sortOrder++, true, false, "Node.throttleLocalTraffic", "Node.throttleLocalTrafficLong", new BooleanCallback() {
@Override
public Boolean get() {
return throttleLocalData;
}
@Override
public void set(Boolean val) throws InvalidConfigValueException {
throttleLocalData = val;
}
});
throttleLocalData = nodeConfig.getBoolean("throttleLocalTraffic");
String s = "Testnet mode DISABLED. You may have some level of anonymity. :)\n"+
"Note that this version of Freenet is still a very early alpha, and may well have numerous bugs and design flaws.\n"+
"In particular: YOU ARE WIDE OPEN TO YOUR IMMEDIATE PEERS! They can eavesdrop on your requests with relatively little difficulty at present (correlation attacks etc).";
Logger.normal(this, s);
System.err.println(s);
File nodeFile = nodeDir.file("node-"+getDarknetPortNumber());
File nodeFileBackup = nodeDir.file("node-"+getDarknetPortNumber()+".bak");
// After we have set up testnet and IP address, load the node file
try {
// FIXME should take file directly?
readNodeFile(nodeFile.getPath());
} catch (IOException e) {
try {
System.err.println("Trying to read node file backup ...");
readNodeFile(nodeFileBackup.getPath());
} catch (IOException e1) {
if(nodeFile.exists() || nodeFileBackup.exists()) {
System.err.println("No node file or cannot read, (re)initialising crypto etc");
System.err.println(e1.toString());
e1.printStackTrace();
System.err.println("After:");
System.err.println(e.toString());
e.printStackTrace();
} else {
System.err.println("Creating new cryptographic keys...");
}
initNodeFileSettings();
}
}
// Then read the peers
peers = new PeerManager(this, shutdownHook);
tracker = new RequestTracker(peers, ticker);
usm.setDispatcher(dispatcher=new NodeDispatcher(this));
uptime = new UptimeEstimator(runDir, ticker, darknetCrypto.identityHash);
// ULPRs
failureTable = new FailureTable(this);
nodeStats = new NodeStats(this, sortOrder, new SubConfig("node.load", config), obwLimit, ibwLimit, lastVersion);
// clientCore needs new load management and other settings from stats.
clientCore = new NodeClientCore(this, config, nodeConfig, installConfig, getDarknetPortNumber(), sortOrder, oldConfig, fproxyConfig, toadlets, nodeDBHandle, databaseKey, db, persistentSecret);
toadlets.setCore(clientCore);
if (JVMVersion.isTooOld()) {
clientCore.alerts.register(new JVMVersionAlert());
}
if(showFriendsVisibilityAlert)
registerFriendsVisibilityAlert();
// Node updater support
System.out.println("Initializing Node Updater");
try {
nodeUpdater = NodeUpdateManager.maybeCreate(this, config);
} catch (InvalidConfigValueException e) {
e.printStackTrace();
throw new NodeInitException(NodeInitException.EXIT_COULD_NOT_START_UPDATER, "Could not create Updater: "+e);
}
// Opennet
final SubConfig opennetConfig = new SubConfig("node.opennet", config);
opennetConfig.register("connectToSeednodes", true, 0, true, false, "Node.withAnnouncement", "Node.withAnnouncementLong", new BooleanCallback() {
@Override
public Boolean get() {
return isAllowedToConnectToSeednodes;
}
@Override
public void set(Boolean val) throws InvalidConfigValueException, NodeNeedRestartException {
if (get().equals(val))
return;
synchronized(Node.this) {
isAllowedToConnectToSeednodes = val;
if(opennet != null)
throw new NodeNeedRestartException(l10n("connectToSeednodesCannotBeChangedMustDisableOpennetOrReboot"));
}
}
});
isAllowedToConnectToSeednodes = opennetConfig.getBoolean("connectToSeednodes");
// Can be enabled on the fly
opennetConfig.register("enabled", false, 0, true, true, "Node.opennetEnabled", "Node.opennetEnabledLong", new BooleanCallback() {
@Override
public Boolean get() {
synchronized(Node.this) {
return opennet != null;
}
}
@Override
public void set(Boolean val) throws InvalidConfigValueException {
OpennetManager o;
synchronized(Node.this) {
if(val == (opennet != null)) return;
if(val) {
try {
o = opennet = new OpennetManager(Node.this, opennetCryptoConfig, System.currentTimeMillis(), isAllowedToConnectToSeednodes);
} catch (NodeInitException e) {
opennet = null;
throw new InvalidConfigValueException(e.getMessage());
}
} else {
o = opennet;
opennet = null;
}
}
if(val) o.start();
else o.stop(true);
ipDetector.ipDetectorManager.notifyPortChange(getPublicInterfacePorts());
}
});
boolean opennetEnabled = opennetConfig.getBoolean("enabled");
opennetConfig.register("maxOpennetPeers", OpennetManager.MAX_PEERS_FOR_SCALING, 1, true, false, "Node.maxOpennetPeers",
"Node.maxOpennetPeersLong", new IntCallback() {
@Override
public Integer get() {
return maxOpennetPeers;
}
@Override
public void set(Integer inputMaxOpennetPeers) throws InvalidConfigValueException {
if(inputMaxOpennetPeers < 0) throw new InvalidConfigValueException(l10n("mustBePositive"));
if(inputMaxOpennetPeers > OpennetManager.MAX_PEERS_FOR_SCALING) throw new InvalidConfigValueException(l10n("maxOpennetPeersMustBeTwentyOrLess", "maxpeers", Integer.toString(OpennetManager.MAX_PEERS_FOR_SCALING)));
maxOpennetPeers = inputMaxOpennetPeers;
}
}
, false);
maxOpennetPeers = opennetConfig.getInt("maxOpennetPeers");
if(maxOpennetPeers > OpennetManager.MAX_PEERS_FOR_SCALING) {
Logger.error(this, "maxOpennetPeers may not be over "+OpennetManager.MAX_PEERS_FOR_SCALING);
maxOpennetPeers = OpennetManager.MAX_PEERS_FOR_SCALING;
}
opennetCryptoConfig = new NodeCryptoConfig(opennetConfig, 2 /* 0 = enabled */, true, securityLevels);
if(opennetEnabled) {
opennet = new OpennetManager(this, opennetCryptoConfig, System.currentTimeMillis(), isAllowedToConnectToSeednodes);
// Will be started later
} else {
opennet = null;
}
securityLevels.addNetworkThreatLevelListener(new SecurityLevelListener<NETWORK_THREAT_LEVEL>() {
@Override
public void onChange(NETWORK_THREAT_LEVEL oldLevel, NETWORK_THREAT_LEVEL newLevel) {
if(newLevel == NETWORK_THREAT_LEVEL.HIGH
|| newLevel == NETWORK_THREAT_LEVEL.MAXIMUM) {
OpennetManager om;
synchronized(Node.this) {
om = opennet;
if(om != null)
opennet = null;
}
if(om != null) {
om.stop(true);
ipDetector.ipDetectorManager.notifyPortChange(getPublicInterfacePorts());
}
} else if(newLevel == NETWORK_THREAT_LEVEL.NORMAL
|| newLevel == NETWORK_THREAT_LEVEL.LOW) {
OpennetManager o = null;
synchronized(Node.this) {
if(opennet == null) {
try {
o = opennet = new OpennetManager(Node.this, opennetCryptoConfig, System.currentTimeMillis(), isAllowedToConnectToSeednodes);
} catch (NodeInitException e) {
opennet = null;
Logger.error(this, "UNABLE TO ENABLE OPENNET: "+e, e);
clientCore.alerts.register(new SimpleUserAlert(false, l10n("enableOpennetFailedTitle"), l10n("enableOpennetFailed", "message", e.getLocalizedMessage()), l10n("enableOpennetFailed", "message", e.getLocalizedMessage()), UserAlert.ERROR));
}
}
}
if(o != null) {
o.start();
ipDetector.ipDetectorManager.notifyPortChange(getPublicInterfacePorts());
}
}
Node.this.config.store();
}
});
opennetConfig.register("acceptSeedConnections", false, 2, true, true, "Node.acceptSeedConnectionsShort", "Node.acceptSeedConnections", new BooleanCallback() {
@Override
public Boolean get() {
return acceptSeedConnections;
}
@Override
public void set(Boolean val) throws InvalidConfigValueException {
acceptSeedConnections = val;
}
});
acceptSeedConnections = opennetConfig.getBoolean("acceptSeedConnections");
if(acceptSeedConnections && opennet != null)
opennet.crypto.socket.getAddressTracker().setHugeTracker();
opennetConfig.finishedInitialization();
nodeConfig.register("passOpennetPeersThroughDarknet", true, sortOrder++, true, false, "Node.passOpennetPeersThroughDarknet", "Node.passOpennetPeersThroughDarknetLong",
new BooleanCallback() {
@Override
public Boolean get() {
synchronized(Node.this) {
return passOpennetRefsThroughDarknet;
}
}
@Override
public void set(Boolean val) throws InvalidConfigValueException {
synchronized(Node.this) {
passOpennetRefsThroughDarknet = val;
}
}
});
passOpennetRefsThroughDarknet = nodeConfig.getBoolean("passOpennetPeersThroughDarknet");
this.extraPeerDataDir = userDir.file("extra-peer-data-"+getDarknetPortNumber());
if (!((extraPeerDataDir.exists() && extraPeerDataDir.isDirectory()) || (extraPeerDataDir.mkdir()))) {
String msg = "Could not find or create extra peer data directory";
throw new NodeInitException(NodeInitException.EXIT_BAD_DIR, msg);
}
// Name
nodeConfig.register("name", myName, sortOrder++, false, true, "Node.nodeName", "Node.nodeNameLong",
new NodeNameCallback());
myName = nodeConfig.getString("name");
// Datastore
nodeConfig.register("storeForceBigShrinks", false, sortOrder++, true, false, "Node.forceBigShrink", "Node.forceBigShrinkLong",
new BooleanCallback() {
@Override
public Boolean get() {
synchronized(Node.this) {
return storeForceBigShrinks;
}
}
@Override
public void set(Boolean val) throws InvalidConfigValueException {
synchronized(Node.this) {
storeForceBigShrinks = val;
}
}
});
// Datastore
nodeConfig.register("storeType", "ram", sortOrder++, true, true, "Node.storeType", "Node.storeTypeLong", new StoreTypeCallback());
storeType = nodeConfig.getString("storeType");
/*
* Very small initial store size, since the node will preallocate it when starting up for the first time,
* BLOCKING STARTUP, and since everyone goes through the wizard anyway...
*/
nodeConfig.register("storeSize", DEFAULT_STORE_SIZE, sortOrder++, false, true, "Node.storeSize", "Node.storeSizeLong",
new LongCallback() {
@Override
public Long get() {
return maxTotalDatastoreSize;
}
@Override
public void set(Long storeSize) throws InvalidConfigValueException {
if(storeSize < MIN_STORE_SIZE)
throw new InvalidConfigValueException(l10n("invalidStoreSize"));
long newMaxStoreKeys = storeSize / sizePerKey;
if(newMaxStoreKeys == maxTotalKeys) return;
// Update each datastore
synchronized(Node.this) {
maxTotalDatastoreSize = storeSize;
maxTotalKeys = newMaxStoreKeys;
maxStoreKeys = maxTotalKeys / 2;
maxCacheKeys = maxTotalKeys - maxStoreKeys;
}
try {
chkDatastore.setMaxKeys(maxStoreKeys, storeForceBigShrinks);
chkDatacache.setMaxKeys(maxCacheKeys, storeForceBigShrinks);
pubKeyDatastore.setMaxKeys(maxStoreKeys, storeForceBigShrinks);
pubKeyDatacache.setMaxKeys(maxCacheKeys, storeForceBigShrinks);
sskDatastore.setMaxKeys(maxStoreKeys, storeForceBigShrinks);
sskDatacache.setMaxKeys(maxCacheKeys, storeForceBigShrinks);
} catch (IOException e) {
// FIXME we need to be able to tell the user.
Logger.error(this, "Caught "+e+" resizing the datastore", e);
System.err.println("Caught "+e+" resizing the datastore");
e.printStackTrace();
}
//Perhaps a bit hackish...? Seems like this should be near it's definition in NodeStats.
nodeStats.avgStoreCHKLocation.changeMaxReports((int)maxStoreKeys);
nodeStats.avgCacheCHKLocation.changeMaxReports((int)maxCacheKeys);
nodeStats.avgSlashdotCacheCHKLocation.changeMaxReports((int)maxCacheKeys);
nodeStats.avgClientCacheCHKLocation.changeMaxReports((int)maxCacheKeys);
nodeStats.avgStoreSSKLocation.changeMaxReports((int)maxStoreKeys);
nodeStats.avgCacheSSKLocation.changeMaxReports((int)maxCacheKeys);
nodeStats.avgSlashdotCacheSSKLocation.changeMaxReports((int)maxCacheKeys);
nodeStats.avgClientCacheSSKLocation.changeMaxReports((int)maxCacheKeys);
}
}, true);
maxTotalDatastoreSize = nodeConfig.getLong("storeSize");
if(maxTotalDatastoreSize < MIN_STORE_SIZE && !storeType.equals("ram")) { // totally arbitrary minimum!
throw new NodeInitException(NodeInitException.EXIT_INVALID_STORE_SIZE, "Store size too small");
}
maxTotalKeys = maxTotalDatastoreSize / sizePerKey;
nodeConfig.register("storeUseSlotFilters", true, sortOrder++, true, false, "Node.storeUseSlotFilters", "Node.storeUseSlotFiltersLong", new BooleanCallback() {
public Boolean get() {
synchronized(Node.this) {
return storeUseSlotFilters;
}
}
public void set(Boolean val) throws InvalidConfigValueException,
NodeNeedRestartException {
synchronized(Node.this) {
storeUseSlotFilters = val;
}
// FIXME l10n
throw new NodeNeedRestartException("Need to restart to change storeUseSlotFilters");
}
});
storeUseSlotFilters = nodeConfig.getBoolean("storeUseSlotFilters");
nodeConfig.register("storeSaltHashSlotFilterPersistenceTime", ResizablePersistentIntBuffer.DEFAULT_PERSISTENCE_TIME, sortOrder++, true, false,
"Node.storeSaltHashSlotFilterPersistenceTime", "Node.storeSaltHashSlotFilterPersistenceTimeLong", new IntCallback() {
@Override
public Integer get() {
return ResizablePersistentIntBuffer.getPersistenceTime();
}
@Override
public void set(Integer val)
throws InvalidConfigValueException,
NodeNeedRestartException {
if(val >= -1)
ResizablePersistentIntBuffer.setPersistenceTime(val);
else
throw new InvalidConfigValueException(l10n("slotFilterPersistenceTimeError"));
}
}, false);
nodeConfig.register("storeSaltHashResizeOnStart", false, sortOrder++, true, false,
"Node.storeSaltHashResizeOnStart", "Node.storeSaltHashResizeOnStartLong", new BooleanCallback() {
@Override
public Boolean get() {
return storeSaltHashResizeOnStart;
}
@Override
public void set(Boolean val) throws InvalidConfigValueException, NodeNeedRestartException {
storeSaltHashResizeOnStart = val;
}
});
storeSaltHashResizeOnStart = nodeConfig.getBoolean("storeSaltHashResizeOnStart");
this.storeDir = setupProgramDir(installConfig, "storeDir", userDir().file("datastore").getPath(), "Node.storeDirectory", "Node.storeDirectoryLong", nodeConfig);
installConfig.finishedInitialization();
final String suffix = getStoreSuffix();
maxStoreKeys = maxTotalKeys / 2;
maxCacheKeys = maxTotalKeys - maxStoreKeys;
/*
* On Windows, setting the file length normally involves writing lots of zeros.
* So it's an uninterruptible system call that takes a loooong time. On OS/X,
* presumably the same is true. If the RNG is fast enough, this means that
* setting the length and writing random data take exactly the same amount
* of time. On most versions of Unix, holes can be created. However on all
* systems, predictable disk usage is a good thing. So lets turn it on by
* default for now, on all systems. The datastore can be read but mostly not
* written while the random data is being written.
*/
nodeConfig.register("storePreallocate", true, sortOrder++, true, true, "Node.storePreallocate", "Node.storePreallocateLong",
new BooleanCallback() {
@Override
public Boolean get() {
return storePreallocate;
}
@Override
public void set(Boolean val) throws InvalidConfigValueException, NodeNeedRestartException {
storePreallocate = val;
if (storeType.equals("salt-hash")) {
setPreallocate(chkDatastore, val);
setPreallocate(chkDatacache, val);
setPreallocate(pubKeyDatastore, val);
setPreallocate(pubKeyDatacache, val);
setPreallocate(sskDatastore, val);
setPreallocate(sskDatacache, val);
}
}
private void setPreallocate(StoreCallback<?> datastore,
boolean val) {
// Avoid race conditions by checking first.
FreenetStore<?> store = datastore.getStore();
if(store instanceof SaltedHashFreenetStore)
((SaltedHashFreenetStore<?>)store).setPreallocate(val);
}}
);
storePreallocate = nodeConfig.getBoolean("storePreallocate");
if(File.separatorChar == '/' && System.getProperty("os.name").toLowerCase().indexOf("mac os") < 0) {
securityLevels.addPhysicalThreatLevelListener(new SecurityLevelListener<SecurityLevels.PHYSICAL_THREAT_LEVEL>() {
@Override
public void onChange(PHYSICAL_THREAT_LEVEL oldLevel, PHYSICAL_THREAT_LEVEL newLevel) {
try {
if(newLevel == PHYSICAL_THREAT_LEVEL.LOW)
nodeConfig.set("storePreallocate", false);
else
nodeConfig.set("storePreallocate", true);
} catch (NodeNeedRestartException e) {
// Ignore
} catch (InvalidConfigValueException e) {
// Ignore
}
}
});
}
securityLevels.addPhysicalThreatLevelListener(new SecurityLevelListener<SecurityLevels.PHYSICAL_THREAT_LEVEL>() {
@Override
public void onChange(PHYSICAL_THREAT_LEVEL oldLevel, PHYSICAL_THREAT_LEVEL newLevel) {
if(newLevel == PHYSICAL_THREAT_LEVEL.MAXIMUM) {
synchronized(this) {
clientCacheAwaitingPassword = false;
databaseAwaitingPassword = false;
}
try {
killMasterKeysFile();
clientCore.clientLayerPersister.disableWrite();
clientCore.clientLayerPersister.waitForNotWriting();
clientCore.clientLayerPersister.deleteAllFiles();
} catch (IOException e) {
masterKeysFile.delete();
Logger.error(this, "Unable to securely delete "+masterKeysFile);
System.err.println(NodeL10n.getBase().getString("SecurityLevels.cantDeletePasswordFile", "filename", masterKeysFile.getAbsolutePath()));
clientCore.alerts.register(new SimpleUserAlert(true, NodeL10n.getBase().getString("SecurityLevels.cantDeletePasswordFileTitle"), NodeL10n.getBase().getString("SecurityLevels.cantDeletePasswordFile"), NodeL10n.getBase().getString("SecurityLevels.cantDeletePasswordFileTitle"), UserAlert.CRITICAL_ERROR));
}
}
if(oldLevel == PHYSICAL_THREAT_LEVEL.MAXIMUM && newLevel != PHYSICAL_THREAT_LEVEL.HIGH) {
// Not passworded.
// Create the master.keys.
// Keys must exist.
try {
MasterKeys keys;
synchronized(this) {
keys = Node.this.keys;
}
keys.changePassword(masterKeysFile, "", secureRandom);
} catch (IOException e) {
Logger.error(this, "Unable to create encryption keys file: "+masterKeysFile+" : "+e, e);
System.err.println("Unable to create encryption keys file: "+masterKeysFile+" : "+e);
e.printStackTrace();
}
}
}
});
if(securityLevels.physicalThreatLevel == PHYSICAL_THREAT_LEVEL.MAXIMUM) {
try {
killMasterKeysFile();
} catch (IOException e) {
String msg = "Unable to securely delete old master.keys file when switching to MAXIMUM seclevel!!";
System.err.println(msg);
throw new NodeInitException(NodeInitException.EXIT_CANT_WRITE_MASTER_KEYS, msg);
}
}
long defaultCacheSize;
long memoryLimit = NodeStarter.getMemoryLimitBytes();
// This is tricky because systems with low memory probably also have slow disks, but using
// up too much memory can be catastrophic...
// Total alchemy, FIXME!
if(memoryLimit == Long.MAX_VALUE || memoryLimit < 0)
defaultCacheSize = 1024*1024;
else if(memoryLimit <= 128*1024*1024)
defaultCacheSize = 0; // Turn off completely for very small memory.
else {
// 9 stores, total should be 5% of memory, up to maximum of 1MB per store at 308MB+
defaultCacheSize = Math.min(1024*1024, (memoryLimit - 128*1024*1024) / (20*9));
}
nodeConfig.register("cachingFreenetStoreMaxSize", defaultCacheSize, sortOrder++, true, false, "Node.cachingFreenetStoreMaxSize", "Node.cachingFreenetStoreMaxSizeLong",
new LongCallback() {
@Override
public Long get() {
synchronized(Node.this) {
return cachingFreenetStoreMaxSize;
}
}
@Override
public void set(Long val) throws InvalidConfigValueException, NodeNeedRestartException {
if(val < 0) throw new InvalidConfigValueException(l10n("invalidMemoryCacheSize"));
// Any positive value is legal. In particular, e.g. 1200 bytes would cause us to cache SSKs but not CHKs.
synchronized(Node.this) {
cachingFreenetStoreMaxSize = val;
}
throw new NodeNeedRestartException("Caching Maximum Size cannot be changed on the fly");
}
}, true);
cachingFreenetStoreMaxSize = nodeConfig.getLong("cachingFreenetStoreMaxSize");
if(cachingFreenetStoreMaxSize < 0)
throw new NodeInitException(NodeInitException.EXIT_BAD_CONFIG, l10n("invalidMemoryCacheSize"));
nodeConfig.register("cachingFreenetStorePeriod", "300k", sortOrder++, true, false, "Node.cachingFreenetStorePeriod", "Node.cachingFreenetStorePeriod",
new LongCallback() {
@Override
public Long get() {
synchronized(Node.this) {
return cachingFreenetStorePeriod;
}
}
@Override
public void set(Long val) throws InvalidConfigValueException, NodeNeedRestartException {
synchronized(Node.this) {
cachingFreenetStorePeriod = val;
}
throw new NodeNeedRestartException("Caching Period cannot be changed on the fly");
}
}, true);
cachingFreenetStorePeriod = nodeConfig.getLong("cachingFreenetStorePeriod");
boolean shouldWriteConfig = false;
if(storeType.equals("bdb-index")) {
System.err.println("Old format Berkeley DB datastore detected.");
System.err.println("This datastore format is no longer supported.");
System.err.println("The old datastore will be securely deleted.");
storeType = "salt-hash";
shouldWriteConfig = true;
deleteOldBDBIndexStoreFiles();
}
if (storeType.equals("salt-hash")) {
initRAMFS();
// FIXME remove migration code
final int lastVersionWithBloom = 1384;
if(lastVersion > 0 && lastVersion <= lastVersionWithBloom) {
// Check for a comment in wrapper.conf saying we've already upgraded, otherwise update it and restart.
long extraMemory = maxTotalKeys * 3 * 4;
int extraMemoryMB = (int)Math.min(Integer.MAX_VALUE, ((extraMemory + 1024 * 1024 - 1) / (1024 * 1024)));
if(extraMemoryMB >= 10) {
System.out.println("Need "+extraMemoryMB+"MB extra space in heap for slot filters.");
UpdateDeployContext.CHANGED changed =
UpdateDeployContext.tryIncreaseMemoryLimit(extraMemoryMB, " Increased because of slot filters in "+(lastVersionWithBloom+1));
if(changed == CHANGED.SUCCESS) {
WrapperManager.restart();
System.err.println("Unable to restart after increasing memory limit for the slot filters (the total memory usage is decreased relative to bloom filters but the heap size needs to grow). Probably due to not running in the wrapper.");
System.err.println("If the node crashes due to out of memory, be it on your own head!");
System.err.println("You need to increase wrapper.java.maxmemory by "+extraMemoryMB);
} else if(changed == CHANGED.FAIL) {
System.err.println("Unable to increase the memory limit for the slot filters (the total memory usage is decreased relative to bloom filters but the heap size needs to grow). Most likely due to being unable to write wrapper.conf or similar problem.");
System.err.println("If the node crashes due to out of memory, be it on your own head!");
System.err.println("You need to increase wrapper.java.maxmemory by "+extraMemoryMB);
} else /*if(changed == CHANGED.ALREADY)*/ {
System.err.println("Memory limit has already been increased for slot filters, continuing startup.");
}
}
}
initSaltHashFS(suffix, false, null);
} else {
initRAMFS();
}
if(databaseAwaitingPassword) createPasswordUserAlert();
// Client cache
// Default is 10MB, in memory only. The wizard will change this.
nodeConfig.register("clientCacheType", "ram", sortOrder++, true, true, "Node.clientCacheType", "Node.clientCacheTypeLong", new ClientCacheTypeCallback());
clientCacheType = nodeConfig.getString("clientCacheType");
nodeConfig.register("clientCacheSize", DEFAULT_CLIENT_CACHE_SIZE, sortOrder++, false, true, "Node.clientCacheSize", "Node.clientCacheSizeLong",
new LongCallback() {
@Override
public Long get() {
return maxTotalClientCacheSize;
}
@Override
public void set(Long storeSize) throws InvalidConfigValueException {
if(storeSize < MIN_CLIENT_CACHE_SIZE)
throw new InvalidConfigValueException(l10n("invalidStoreSize"));
long newMaxStoreKeys = storeSize / sizePerKey;
if(newMaxStoreKeys == maxClientCacheKeys) return;
// Update each datastore
synchronized(Node.this) {
maxTotalClientCacheSize = storeSize;
maxClientCacheKeys = newMaxStoreKeys;
}
try {
chkClientcache.setMaxKeys(maxClientCacheKeys, storeForceBigShrinks);
pubKeyClientcache.setMaxKeys(maxClientCacheKeys, storeForceBigShrinks);
sskClientcache.setMaxKeys(maxClientCacheKeys, storeForceBigShrinks);
} catch (IOException e) {
// FIXME we need to be able to tell the user.
Logger.error(this, "Caught "+e+" resizing the clientcache", e);
System.err.println("Caught "+e+" resizing the clientcache");
e.printStackTrace();
}
}
}, true);
maxTotalClientCacheSize = nodeConfig.getLong("clientCacheSize");
if(maxTotalClientCacheSize < MIN_CLIENT_CACHE_SIZE) {
throw new NodeInitException(NodeInitException.EXIT_INVALID_STORE_SIZE, "Client cache size too small");
}
maxClientCacheKeys = maxTotalClientCacheSize / sizePerKey;
boolean startedClientCache = false;
if (clientCacheType.equals("salt-hash")) {
if(clientCacheKey == null) {
System.err.println("Cannot open client-cache, it is passworded");
setClientCacheAwaitingPassword();
} else {
initSaltHashClientCacheFS(suffix, false, clientCacheKey);
startedClientCache = true;
}
} else if(clientCacheType.equals("none")) {
initNoClientCacheFS();
startedClientCache = true;
} else { // ram
initRAMClientCacheFS();
startedClientCache = true;
}
if(!startedClientCache)
initRAMClientCacheFS();
if(!clientCore.loadedDatabase() && databaseKey != null) {
try {
lateSetupDatabase(databaseKey);
} catch (MasterKeysWrongPasswordException e2) {
System.err.println("Impossible: "+e2);
e2.printStackTrace();
} catch (MasterKeysFileSizeException e2) {
System.err.println("Impossible: "+e2);
e2.printStackTrace();
} catch (IOException e2) {
System.err.println("Unable to load database: "+e2);
e2.printStackTrace();
}
}
nodeConfig.register("useSlashdotCache", true, sortOrder++, true, false, "Node.useSlashdotCache", "Node.useSlashdotCacheLong", new BooleanCallback() {
@Override
public Boolean get() {
return useSlashdotCache;
}
@Override
public void set(Boolean val) throws InvalidConfigValueException, NodeNeedRestartException {
useSlashdotCache = val;
}
});
useSlashdotCache = nodeConfig.getBoolean("useSlashdotCache");
nodeConfig.register("writeLocalToDatastore", false, sortOrder++, true, false, "Node.writeLocalToDatastore", "Node.writeLocalToDatastoreLong", new BooleanCallback() {
@Override
public Boolean get() {
return writeLocalToDatastore;
}
@Override
public void set(Boolean val) throws InvalidConfigValueException, NodeNeedRestartException {
writeLocalToDatastore = val;
}
});
writeLocalToDatastore = nodeConfig.getBoolean("writeLocalToDatastore");
// LOW network *and* physical seclevel = writeLocalToDatastore
securityLevels.addNetworkThreatLevelListener(new SecurityLevelListener<NETWORK_THREAT_LEVEL>() {
@Override
public void onChange(NETWORK_THREAT_LEVEL oldLevel, NETWORK_THREAT_LEVEL newLevel) {
if(newLevel == NETWORK_THREAT_LEVEL.LOW && securityLevels.getPhysicalThreatLevel() == PHYSICAL_THREAT_LEVEL.LOW)
writeLocalToDatastore = true;
else
writeLocalToDatastore = false;
}
});
securityLevels.addPhysicalThreatLevelListener(new SecurityLevelListener<PHYSICAL_THREAT_LEVEL>() {
@Override
public void onChange(PHYSICAL_THREAT_LEVEL oldLevel, PHYSICAL_THREAT_LEVEL newLevel) {
if(newLevel == PHYSICAL_THREAT_LEVEL.LOW && securityLevels.getNetworkThreatLevel() == NETWORK_THREAT_LEVEL.LOW)
writeLocalToDatastore = true;
else
writeLocalToDatastore = false;
}
});
nodeConfig.register("slashdotCacheLifetime", MINUTES.toMillis(30), sortOrder++, true, false, "Node.slashdotCacheLifetime", "Node.slashdotCacheLifetimeLong", new LongCallback() {
@Override
public Long get() {
return chkSlashdotcacheStore.getLifetime();
}
@Override
public void set(Long val) throws InvalidConfigValueException, NodeNeedRestartException {
if(val < 0) throw new InvalidConfigValueException("Must be positive!");
chkSlashdotcacheStore.setLifetime(val);
pubKeySlashdotcacheStore.setLifetime(val);
sskSlashdotcacheStore.setLifetime(val);
}
}, false);
long slashdotCacheLifetime = nodeConfig.getLong("slashdotCacheLifetime");
nodeConfig.register("slashdotCacheSize", DEFAULT_SLASHDOT_CACHE_SIZE, sortOrder++, false, true, "Node.slashdotCacheSize", "Node.slashdotCacheSizeLong",
new LongCallback() {
@Override
public Long get() {
return maxSlashdotCacheSize;
}
@Override
public void set(Long storeSize) throws InvalidConfigValueException {
if(storeSize < MIN_SLASHDOT_CACHE_SIZE)
throw new InvalidConfigValueException(l10n("invalidStoreSize"));
int newMaxStoreKeys = (int) Math.min(storeSize / sizePerKey, Integer.MAX_VALUE);
if(newMaxStoreKeys == maxSlashdotCacheKeys) return;
// Update each datastore
synchronized(Node.this) {
maxSlashdotCacheSize = storeSize;
maxSlashdotCacheKeys = newMaxStoreKeys;
}
try {
chkSlashdotcache.setMaxKeys(maxSlashdotCacheKeys, storeForceBigShrinks);
pubKeySlashdotcache.setMaxKeys(maxSlashdotCacheKeys, storeForceBigShrinks);
sskSlashdotcache.setMaxKeys(maxSlashdotCacheKeys, storeForceBigShrinks);
} catch (IOException e) {
// FIXME we need to be able to tell the user.
Logger.error(this, "Caught "+e+" resizing the slashdotcache", e);
System.err.println("Caught "+e+" resizing the slashdotcache");
e.printStackTrace();
}
}
}, true);
maxSlashdotCacheSize = nodeConfig.getLong("slashdotCacheSize");
if(maxSlashdotCacheSize < MIN_SLASHDOT_CACHE_SIZE) {
throw new NodeInitException(NodeInitException.EXIT_INVALID_STORE_SIZE, "Slashdot cache size too small");
}
maxSlashdotCacheKeys = (int) Math.min(maxSlashdotCacheSize / sizePerKey, Integer.MAX_VALUE);
chkSlashdotcache = new CHKStore();
chkSlashdotcacheStore = new SlashdotStore<CHKBlock>(chkSlashdotcache, maxSlashdotCacheKeys, slashdotCacheLifetime, PURGE_INTERVAL, ticker, this.clientCore.tempBucketFactory);
pubKeySlashdotcache = new PubkeyStore();
pubKeySlashdotcacheStore = new SlashdotStore<DSAPublicKey>(pubKeySlashdotcache, maxSlashdotCacheKeys, slashdotCacheLifetime, PURGE_INTERVAL, ticker, this.clientCore.tempBucketFactory);
getPubKey.setLocalSlashdotcache(pubKeySlashdotcache);
sskSlashdotcache = new SSKStore(getPubKey);
sskSlashdotcacheStore = new SlashdotStore<SSKBlock>(sskSlashdotcache, maxSlashdotCacheKeys, slashdotCacheLifetime, PURGE_INTERVAL, ticker, this.clientCore.tempBucketFactory);
// MAXIMUM seclevel = no slashdot cache.
securityLevels.addNetworkThreatLevelListener(new SecurityLevelListener<NETWORK_THREAT_LEVEL>() {
@Override
public void onChange(NETWORK_THREAT_LEVEL oldLevel, NETWORK_THREAT_LEVEL newLevel) {
if(newLevel == NETWORK_THREAT_LEVEL.MAXIMUM)
useSlashdotCache = false;
else if(oldLevel == NETWORK_THREAT_LEVEL.MAXIMUM)
useSlashdotCache = true;
}
});
nodeConfig.register("skipWrapperWarning", false, sortOrder++, true, false, "Node.skipWrapperWarning", "Node.skipWrapperWarningLong", new BooleanCallback() {
@Override
public void set(Boolean value) throws InvalidConfigValueException, NodeNeedRestartException {
skipWrapperWarning = value;
}
@Override
public Boolean get() {
return skipWrapperWarning;
}
});
skipWrapperWarning = nodeConfig.getBoolean("skipWrapperWarning");
nodeConfig.register("maxPacketSize", 1280, sortOrder++, true, true, "Node.maxPacketSize", "Node.maxPacketSizeLong", new IntCallback() {
@Override
public Integer get() {
synchronized(Node.this) {
return maxPacketSize;
}
}
@Override
public void set(Integer val) throws InvalidConfigValueException,
NodeNeedRestartException {
synchronized(Node.this) {
if(val == maxPacketSize) return;
if(val < UdpSocketHandler.MIN_MTU) throw new InvalidConfigValueException("Must be over 576");
if(val > 1492) throw new InvalidConfigValueException("Larger than ethernet frame size unlikely to work!");
maxPacketSize = val;
}
updateMTU();
}