}
pipeline.addLast("decoder", new HttpRequestDecoder());
List<ChannelHandler> decoders = consumer.getConfiguration().getDecoders();
for (int x = 0; x < decoders.size(); x++) {
ChannelHandler decoder = decoders.get(x);
if (decoder instanceof ChannelHandlerFactory) {
// use the factory to create a new instance of the channel as it may not be shareable
decoder = ((ChannelHandlerFactory) decoder).newChannelHandler();
}
pipeline.addLast("decoder-" + x, decoder);
}
pipeline.addLast("aggregator", new HttpChunkAggregator(configuration.getChunkedMaxContentLength()));
pipeline.addLast("encoder", new HttpResponseEncoder());
List<ChannelHandler> encoders = consumer.getConfiguration().getEncoders();
for (int x = 0; x < encoders.size(); x++) {
ChannelHandler encoder = encoders.get(x);
if (encoder instanceof ChannelHandlerFactory) {
// use the factory to create a new instance of the channel as it may not be shareable
encoder = ((ChannelHandlerFactory) encoder).newChannelHandler();
}
pipeline.addLast("encoder-" + x, encoder);
}
if (supportCompressed()) {
pipeline.addLast("deflater", new HttpContentCompressor());
}
if (consumer.getConfiguration().isOrderedThreadPoolExecutor()) {
// this must be added just before the HttpServerMultiplexChannelHandler
// use ordered thread pool, to ensure we process the events in order, and can send back
// replies in the expected order. eg this is required by TCP.
// and use a Camel thread factory so we have consistent thread namings
ExecutionHandler executionHandler = new ExecutionHandler(consumer.getEndpoint().getComponent().getExecutorService());
pipeline.addLast("executionHandler", executionHandler);
LOG.debug("Using OrderedMemoryAwareThreadPoolExecutor with core pool size: {}", consumer.getConfiguration().getMaximumPoolSize());
}
int port = consumer.getConfiguration().getPort();
ChannelHandler handler = consumer.getEndpoint().getComponent().getMultiplexChannelHandler(port).getChannelHandler();
pipeline.addLast("handler", handler);
return pipeline;
}