if (log.isLoggable(Level.FINE)) log.fine("Subscribing ...");
String passwd = "secret";
SubscribeKey subKeyW = new SubscribeKey(glob, publishOid1);
String subKey = subKeyW.toXml(); // "<key oid='" + publishOid1 + "' queryType='EXACT'></key>";
SubscribeQos subQosW = new SubscribeQos(glob); // "<qos></qos>";
String subQos = subQosW.toXml();
manyClients = new Client[numSubscribers];
if (maxSubPerCon >0 ) {
// Check if reasonably
if ( numSubscribers % maxSubPerCon!= 0) {
assertTrue("numSubscribers not divadable by breakpoint", false);
}
manyConnections = new I_XmlBlasterAccess[numSubscribers/maxSubPerCon];
} // end of if ()
long usedBefore = getUsedServerMemory();
log.info("Setting up " + numSubscribers + " subscriber clients ...");
int startNoThreads = ThreadLister.countThreads();
//ThreadLister.listAllThreads(System.out);
stopWatch = new StopWatch();
for (int ii=0; ii<numSubscribers; ii++) {
Client sub = new Client();
sub.loginName = "Joe-" + ii;
sub.oneConnection = useOneConnection;
if (useOneConnection) {
// Should we distribute among a few connections
if (maxSubPerCon >0) {
if ( ii % maxSubPerCon == 0) {
ci++;
try {
log.fine("Creating connection no: " +ci);
Global gg = globalUtil.getClone(glob);
// Try to reuse the same ORB to avoid too many threads:
if ("IOR".equals(gg.getProperty().get("protocol","IOR")) && ci > 0) {
gg.addObjectEntry(Constants.RELATING_CLIENT+":org.xmlBlaster.util.protocol.corba.OrbInstanceWrapper",
(org.xmlBlaster.util.protocol.corba.OrbInstanceWrapper)manyConnections[ci-1].getGlobal().getObjectEntry(Constants.RELATING_CLIENT+":org.xmlBlaster.util.protocol.corba.OrbInstanceWrapper"));
}
manyConnections[ci] = gg.getXmlBlasterAccess();
ConnectQos connectQos = new ConnectQos(gg, sub.loginName, passwd); // "<qos></qos>"; During login this is manipulated (callback address added)
// If we have many subs on one con, we must raise the max size of the callback queue!
CbQueueProperty cbProp =connectQos.getSessionCbQueueProperty();
// algo is maxSubPerCon*4
cbProp.setMaxEntries(maxSubPerCon*1000);//This means we have a backlog of 1000 messages per subscriber as i normal when each con only have one subscriber!
//cbProp.setMaxBytes(4000);
//cbProp.setOnOverflow(Constants.ONOVERFLOW_BLOCK);
//connectQos.setSubjectQueueProperty(cbProp);
log.fine("Login qos: " + connectQos.toXml());
ConnectReturnQos connectReturnQos = manyConnections[ci].connect(connectQos, this);
log.info("Connected maxSubPerCon=" + maxSubPerCon + " : " + connectReturnQos.toXml());
}
catch (Exception e) {
log.severe("Login failed: " + e.toString());
assertTrue("Login failed: " + e.toString(), false);
}
} // end of if ()
sub.connection = manyConnections[ci];
} else {
sub.connection = oneConnection;
}
}else {
try {
Global gg = globalUtil.getClone(glob);
sub.connection = gg.getXmlBlasterAccess();
ConnectQos connectQos = new ConnectQos(gg, sub.loginName, passwd); // "<qos></qos>"; During login this is manipulated (callback address added)
ConnectReturnQos connectReturnQos = sub.connection.connect(connectQos, this);
log.info("Connected: " + connectReturnQos.toXml());
}
catch (Exception e) {
log.severe("Login failed: " + e.toString());
assertTrue("Login failed: " + e.toString(), false);
}
}
try {
sub.subscribeOid = sub.connection.subscribe(subKey, subQos).getSubscriptionId();
log.fine("Client " + sub.loginName + " subscribed to " + subKeyW.getOid());
} catch(XmlBlasterException e) {
log.warning("XmlBlasterException: " + e.getMessage());
assertTrue("subscribe - XmlBlasterException: " + e.getMessage(), false);
}