if (_logger.isLoggable(Level.FINEST)) {
_logger.log(Level.FINEST, "clb.proxy.http.protocol_handler_invoked", protocolInfo.key);
}
Object ptask = ((ClbProxyProtocolInfo) protocolInfo).object;
ProxyRequestHandler task = null;
Endpoint endpoint = null;
Request request = null;
Response response = null;
proxy.setSelectorThread(((ClbProxyProtocolInfo) protocolInfo).selectorThread);
if (ptask != null) {
task = (ProxyRequestHandler) ptask;
response = task.getResponse();
request = task.getRequest();
if (response.getStatus() != 200) {
if (response.getStatus() == -1) {
if (_logger.isLoggable(Level.FINEST)){
_logger.log(Level.FINEST, "http-maximum-overload");
}
doOverloadAction(response);
/* maximum overload condition
* sleeping keeps the thread occupied,
* we close the connection and release the task later
*/
} else {
_logger.log(Level.SEVERE,
"clb.proxy.http.handler_error_response");
response.setHeader(LoadBalancerProxyConstants.HTTP_CONNECTION_HEADER,
LoadBalancerProxyConstants.HTTP_CONNECTION_CLOSE_VALUE);
if (task.isSecure()) {
sendSecureResponse(request, response);
} else {
sendResponse(request, response);
}
if (_logger.isLoggable(Level.FINE)) {
_logger.log(Level.FINE,
"clb.proxy.http.handler_sent_error_response");
}
}
task.recycle();
objManager.offerTask(task, protocolInfo.isSecure);
if (_logger.isLoggable(Level.FINEST)) {
_logger.log(Level.FINEST, "clb.proxy.http.handler_released_resources");
}
protocolInfo.keepAlive = false;
return;
}
endpoint = task.getEndpoint();
if (_logger.isLoggable(Level.FINE)) {
_logger.log(Level.FINE,
"clb.proxy.http.handler.endpoint", endpoint);
}
} else {
/*
* Means its not a new request but there is more data to
* read from an older socket
*/
if (_logger.isLoggable(Level.FINEST)) {
_logger.log(Level.FINEST, "clb.proxy.http.handler_more_data");
}
}
/** Invoke the proxy API
*/
/**
* We have
* the buffer where the headers are read completely
* the key
* the remote address is available through a ThreadLocal
* object from the HA LB. This invocation is just like whats done
* in WSTCPProtocolHandler
*/
((ClbProxyProtocolInfo) protocolInfo).cacheHandler = proxy.doProxyHttp(task,
protocolInfo.byteBuffer, protocolInfo.key);
if (_logger.isLoggable(Level.FINE)) {
_logger.log(Level.FINE,
"clb.proxy.http.handler_doproxy_return",
((ClbProxyProtocolInfo) protocolInfo).cacheHandler);
}
/* If we return false here the grizzly 1.0 controller will cancel key and close
* the channel. Its a weird scenario because we will have to wait until a
* response is received from the backend, whateven async processing we
* do with the backend is of little use becase this thread cannot return.
* If ret is true it means that the current requets has been read completely and
* any more bytes on the chanenl is a new request which has to go thro
* the finder
*
*/
if (task == null) {
task = proxy.getConnectionManager().getServerEndpoint(protocolInfo.key);
}
if ((task != null) && (task.getError())) {
// need to see how we should handle keepAlive here
// always keep alive for now, because we will cancel key through the
// call back
protocolInfo.keepAlive = false;
send503Response(task.getRequest(), task.getResponse());
} else {
protocolInfo.keepAlive = true;
}
if (!((ClbProxyProtocolInfo) protocolInfo).cacheHandler) {
protocolInfo.mappedProtocols.remove(protocolInfo.key);
if (task != null) {
HttpProxy.getInstance().getConnectionManager().
removeClientEndpoint(task.getSelectionKey());
task.recycle();
objManager.offerTask(task, protocolInfo.isSecure);
} else {
_logger.log(Level.SEVERE, "clb.proxy.http.handler_release_fail");
}
((ClbProxyProtocolInfo) protocolInfo).parkRequest = protocolInfo.keepAlive;