Random rand = new Random(System.nanoTime());
ZContext ctx = new ZContext();
// Prepare local frontend and backend
Socket localfe = ctx.createSocket(ZMQ.ROUTER);
localfe.bind(String.format("ipc://%s-localfe.ipc", self));
Socket localbe = ctx.createSocket(ZMQ.ROUTER);
localbe.bind(String.format("ipc://%s-localbe.ipc", self));
// Bind cloud frontend to endpoint
Socket cloudfe = ctx.createSocket(ZMQ.ROUTER);
cloudfe.setIdentity(self.getBytes());
cloudfe.bind(String.format("ipc://%s-cloud.ipc", self));
// Connect cloud backend to all peers
Socket cloudbe = ctx.createSocket(ZMQ.ROUTER);
cloudbe.setIdentity(self.getBytes());
int argn;
for (argn = 1; argn < argv.length; argn++) {
String peer = argv[argn];
System.out.printf("I: connecting to cloud forintend at '%s'\n", peer);
cloudbe.connect(String.format("ipc://%s-cloud.ipc", peer));
}
// Bind state backend to endpoint
Socket statebe = ctx.createSocket(ZMQ.PUB);
statebe.bind(String.format("ipc://%s-state.ipc", self));
// Connect statefe to all peers
Socket statefe = ctx.createSocket(ZMQ.SUB);
statefe.subscribe("".getBytes());
for (argn = 1; argn < argv.length; argn++) {
String peer = argv[argn];
System.out.printf("I: connecting to state backend at '%s'\n", peer);
statefe.connect(String.format("ipc://%s-state.ipc", peer));
}
// Prepare monitor socket
Socket monitor = ctx.createSocket(ZMQ.PULL);
monitor.bind(String.format("ipc://%s-monitor.ipc", self));
// Start local workers
int worker_nbr;
for (worker_nbr = 0; worker_nbr < NBR_WORKERS; worker_nbr++)
new worker_task().start();
// Start local clients
int client_nbr;
for (client_nbr = 0; client_nbr < NBR_CLIENTS; client_nbr++)
new client_task().start();
// Queue of available workers
int localCapacity = 0;
int cloudCapacity = 0;
ArrayList<ZFrame> workers = new ArrayList<ZFrame>();
// The main loop has two parts. First we poll workers and our two service
// sockets (statefe and monitor), in any case. If we have no ready workers,
// there's no point in looking at incoming requests. These can remain on
// their internal 0MQ queues:
while (true) {
// First, route any waiting replies from workers
PollItem primary[] = {
new PollItem(localbe, Poller.POLLIN),
new PollItem(cloudbe, Poller.POLLIN),
new PollItem(statefe, Poller.POLLIN),
new PollItem(monitor, Poller.POLLIN)
};
// If we have no workers anyhow, wait indefinitely
int rc = ZMQ.poll(primary,
localCapacity > 0 ? 1000 : -1);
if (rc == -1)
break; // Interrupted
// Track if capacity changes during this iteration
int previous = localCapacity;
// Handle reply from local worker
ZMsg msg = null;
if (primary[0].isReadable()) {
msg = ZMsg.recvMsg(localbe);
if (msg == null)
break; // Interrupted
ZFrame address = msg.unwrap();
workers.add(address);
localCapacity++;
// If it's READY, don't route the message any further
ZFrame frame = msg.getFirst();
if (new String(frame.getData()).equals(WORKER_READY)) {
msg.destroy();
msg = null;
}
}
// Or handle reply from peer broker
else if (primary[1].isReadable()) {
msg = ZMsg.recvMsg(cloudbe);
if (msg == null)
break; // Interrupted
// We don't use peer broker address for anything
ZFrame address = msg.unwrap();
address.destroy();
}
// Route reply to cloud if it's addressed to a broker
for (argn = 1; msg != null && argn < argv.length; argn++) {
byte[] data = msg.getFirst().getData();
if (argv[argn].equals(new String(data))) {
msg.send(cloudfe);
msg = null;
}
}
// Route reply to client if we still need to
if (msg != null)
msg.send(localfe);
// If we have input messages on our statefe or monitor sockets we
// can process these immediately:
if (primary[2].isReadable()) {
String peer = statefe.recvStr();
String status = statefe.recvStr();
cloudCapacity = Integer.parseInt(status);
}
if (primary[3].isReadable()) {
String status = monitor.recvStr();
System.out.println(status);
}
// Now we route as many client requests as we have worker capacity
// for. We may reroute requests from our local frontend, but not from //