Package org.jboss.netty.channel.group

Examples of org.jboss.netty.channel.group.ChannelGroup


        ChannelFactory factory = new NioServerSocketChannelFactory(Executors.newCachedThreadPool(),
                Executors.newCachedThreadPool(), (Runtime.getRuntime().availableProcessors() * 2 + 1) * 2);

        ServerBootstrap bootstrap = new ServerBootstrap(factory);
        // Create the global ChannelGroup
        ChannelGroup channelGroup = new DefaultChannelGroup(TSOServer.class.getName());
        // threads max
        // int maxThreads = Runtime.getRuntime().availableProcessors() *2 + 1;
        int maxThreads = 5;
        // Memory limitation: 1MB by channel, 1GB global, 100 ms of timeout
        ThreadPoolExecutor pipelineExecutor = new OrderedMemoryAwareThreadPoolExecutor(maxThreads, 1048576, 1073741824,
                100, TimeUnit.MILLISECONDS, new ObjectSizeEstimator() {
                  @Override
                  public int estimateSize(Object o) {
                     return 1000;
                  }
               }, Executors.defaultThreadFactory());

        // This is the only object of timestamp oracle
        // TODO: make it singleton
        //TimestampOracle timestampOracle = new TimestampOracle();
        // The wrapper for the shared state of TSO
        state = BookKeeperStateBuilder.getState(this.config);
       
        if(state == null){
            LOG.error("Couldn't build state");
            return;
        }

        state.addRecord(new byte[] { LoggerProtocol.LOGSTART }, new AddRecordCallback() {
            @Override
            public void addRecordComplete(int rc, Object ctx) {
            }
        }, null);

        TSOState.BATCH_SIZE = config.getBatchSize();
        System.out.println("PARAM MAX_ITEMS: " + TSOState.MAX_ITEMS);
        System.out.println("PARAM BATCH_SIZE: " + TSOState.BATCH_SIZE);
        System.out.println("PARAM LOAD_FACTOR: " + TSOState.LOAD_FACTOR);
        System.out.println("PARAM MAX_THREADS: " + maxThreads);

        final TSOHandler handler = new TSOHandler(channelGroup, state);
        handler.start();

        bootstrap.setPipelineFactory(new TSOPipelineFactory(pipelineExecutor, handler));
        bootstrap.setOption("tcpNoDelay", false);
        //setting buffer size can improve I/O
        bootstrap.setOption("child.sendBufferSize", 1048576);
        bootstrap.setOption("child.receiveBufferSize", 1048576);
        // better to have an receive buffer predictor
        bootstrap.setOption("receiveBufferSizePredictorFactory",
              new AdaptiveReceiveBufferSizePredictorFactory());
        //if the server is sending 1000 messages per sec, optimum write buffer water marks will
        //prevent unnecessary throttling, Check NioSocketChannelConfig doc
        bootstrap.setOption("writeBufferLowWaterMark", 32 * 1024);
        bootstrap.setOption("writeBufferHighWaterMark", 64 * 1024);

        bootstrap.setOption("child.tcpNoDelay", false);
        bootstrap.setOption("child.keepAlive", true);
        bootstrap.setOption("child.reuseAddress", true);
        bootstrap.setOption("child.connectTimeoutMillis", 60000);

        // *** Start the Netty running ***

        // Create the monitor
        ThroughputMonitor monitor = new ThroughputMonitor(state);
        // Add the parent channel to the group
        Channel channel = bootstrap.bind(new InetSocketAddress(config.getPort()));
        channelGroup.add(channel);
       
        // Compacter handler
        ChannelFactory comFactory = new NioServerSocketChannelFactory(Executors.newCachedThreadPool(),
              Executors.newCachedThreadPool(), (Runtime.getRuntime().availableProcessors() * 2 + 1) * 2);
        ServerBootstrap comBootstrap = new ServerBootstrap(comFactory);
        ChannelGroup comGroup = new DefaultChannelGroup("compacter");
        final CompacterHandler comHandler = new CompacterHandler(comGroup, state);
        comBootstrap.setPipelineFactory(new ChannelPipelineFactory() {

           @Override
           public ChannelPipeline getPipeline() throws Exception {
              ChannelPipeline pipeline = Channels.pipeline();
              pipeline.addLast("decoder", new ObjectDecoder());
              pipeline.addLast("encoder", new ObjectEncoder());
              pipeline.addLast("handler", comHandler);
              return pipeline;
           }
        });       
        comBootstrap.setOption("tcpNoDelay", false);
        comBootstrap.setOption("child.tcpNoDelay", false);
        comBootstrap.setOption("child.keepAlive", true);
        comBootstrap.setOption("child.reuseAddress", true);
        comBootstrap.setOption("child.connectTimeoutMillis", 100);
        comBootstrap.setOption("readWriteFair", true);
        channel = comBootstrap.bind(new InetSocketAddress(config.getPort() + 1));

        // Starts the monitor
        monitor.start();
        synchronized (lock) {
            while (!finish) {
                try {
                    lock.wait();
                } catch (InterruptedException e) {
                    break;
                }
            }
        }

        //timestampOracle.stop();
        handler.stop();
        comHandler.stop();
        state.stop();

        // *** Start the Netty shutdown ***

        // End the monitor
        System.out.println("End of monitor");
        monitor.interrupt();
        // Now close all channels
        System.out.println("End of channel group");
        channelGroup.close().awaitUninterruptibly();
        comGroup.close().awaitUninterruptibly();
        // Close the executor for Pipeline
        System.out.println("End of pipeline executor");
        pipelineExecutor.shutdownNow();
        // Now release resources
        System.out.println("End of resources");
View Full Code Here


        // Start client with Nb of active threads = 3 as maximum.
        ChannelFactory factory = new NioClientSocketChannelFactory(Executors
                .newCachedThreadPool(), Executors.newCachedThreadPool(), 30);

        // Create the global ChannelGroup
        ChannelGroup channelGroup = new DefaultChannelGroup(
                TransactionClient.class.getName());
       
        List<ClientHandler> handlers = new ArrayList<ClientHandler>();

        Configuration conf = HBaseConfiguration.create();
        conf.set("tso.host", host);
        conf.setInt("tso.port", port);
        conf.setInt("tso.executor.threads", 10);

        for(int i = 0; i < runs; ++i) {
         // Create the associated Handler
           ClientHandler handler = new ClientHandler(conf, nbMessage, inflight, pauseClient, percentRead);
  
           // *** Start the Netty running ***
  
           System.out.println("PARAM MAX_ROW: " + ClientHandler.MAX_ROW);
           System.out.println("PARAM DB_SIZE: " + ClientHandler.DB_SIZE);
           System.out.println("pause " + pauseClient);
           System.out.println("readPercent " + percentRead);
  
           handlers.add(handler);
          
           if ((i - 1) % 20 == 0) Thread.sleep(1000);
        }
       
        // Wait for the Traffic to finish
        for (ClientHandler handler : handlers) {
           boolean result = handler.waitForAll();
           System.out.println("Result: " + result);
        }

        // *** Start the Netty shutdown ***

        // Now close all channels
        System.out.println("close channelGroup");
        channelGroup.close().awaitUninterruptibly();
        // Now release resources
        System.out.println("close external resources");
        factory.releaseExternalResources();
    }
View Full Code Here

    /*
     * gather connections related to the targetUuid requested
     */
    @Override
    public void createSession(SourceHandler sourceHandler, Class<? extends TargetHandler> targetHandler, String targetUuid) {
        ChannelGroup group = new DefaultChannelGroup();
        HashSet<String> uuids = new HashSet<>();
        boolean breakOnMatch = targetUuid != null && !targetUuid.isEmpty();
        int sessionID = nextSession.incrementAndGet();
        synchronized (connectedChannels) {
            for (ChannelState state : connectedChannels) {
                if (
                        (targetUuid == MeshyConstants.LINK_ALL) ||
                        (targetUuid == MeshyConstants.LINK_NAMED && state.getRemoteAddress() != null) ||
                        (state.getName() != null && targetUuid.equals(state.getName()))
                        ) {
                    /* prevent dups if >1 connection to the same host */
                    if (state.getName() != null && !uuids.add(state.getName())) {
                        continue;
                    }
                    group.add(state.getChannel());
                    /* add channel callback path to source */
                    state.addSourceHandler(sessionID, sourceHandler);
                    if (breakOnMatch) {
                        break;
                    }
View Full Code Here

            bootstrap.setPipelineFactory(pfact);
            InetSocketAddress sa =
                (openFlowHost == null)
                ? new InetSocketAddress(openFlowPort)
                : new InetSocketAddress(openFlowHost, openFlowPort);
            final ChannelGroup cg = new DefaultChannelGroup();
            cg.add(bootstrap.bind(sa));

            log.info("Listening for switch connections on {}", sa);
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
View Full Code Here

    public void removeFromParty(Session session) {
        party.remove(session.getHabbo().getId());
    }

    public void respond(Response response) {
        ChannelGroup group = new DefaultChannelGroup();

        for(Session session : party.values()) {
            group.add(session.getChannel());
        }

        group.write(response);
    }
View Full Code Here

        // Start client with Nb of active threads = 3 as maximum.
        ChannelFactory factory = new NioClientSocketChannelFactory(Executors.newCachedThreadPool(),
                Executors.newCachedThreadPool(), 30);

        // Create the global ChannelGroup
        ChannelGroup channelGroup = new DefaultChannelGroup(TransactionClient.class.getName());

        List<ClientHandler> handlers = new ArrayList<ClientHandler>();

        Configuration conf = HBaseConfiguration.create();
        conf.set("tso.host", host);
        conf.setInt("tso.port", port);
        conf.setInt("tso.executor.threads", 10);

        for (int i = 0; i < runs; ++i) {
            // Create the associated Handler
            ClientHandler handler = new ClientHandler(conf, nbMessage, inflight, pauseClient, percentRead);

            // *** Start the Netty running ***

            System.out.println("PARAM MAX_ROW: " + ClientHandler.MAX_ROW);
            System.out.println("PARAM DB_SIZE: " + ClientHandler.DB_SIZE);
            System.out.println("pause " + pauseClient);
            System.out.println("readPercent " + percentRead);

            handlers.add(handler);

            if ((i - 1) % 20 == 0)
                Thread.sleep(1000);
        }

        // Wait for the Traffic to finish
        for (ClientHandler handler : handlers) {
            boolean result = handler.waitForAll();
            System.out.println("Result: " + result);
        }

        // *** Start the Netty shutdown ***

        // Now close all channels
        System.out.println("close channelGroup");
        channelGroup.close().awaitUninterruptibly();
        // Now release resources
        System.out.println("close external resources");
        factory.releaseExternalResources();
    }
View Full Code Here

        ChannelFactory factory = new NioServerSocketChannelFactory(Executors.newCachedThreadPool(),
                Executors.newCachedThreadPool(), (Runtime.getRuntime().availableProcessors() * 2 + 1) * 2);

        ServerBootstrap bootstrap = new ServerBootstrap(factory);
        // Create the global ChannelGroup
        ChannelGroup channelGroup = new DefaultChannelGroup(TSOServer.class.getName());
        // threads max
        // int maxThreads = Runtime.getRuntime().availableProcessors() *2 + 1;
        int maxThreads = 5;
        // Memory limitation: 1MB by channel, 1GB global, 100 ms of timeout
        ThreadPoolExecutor pipelineExecutor = new OrderedMemoryAwareThreadPoolExecutor(maxThreads, 1048576, 1073741824, 100,
                TimeUnit.MILLISECONDS, new ObjectSizeEstimator() {
                    @Override
                    public int estimateSize(Object o) {
                        return 1000;
                    }
                }, Executors.defaultThreadFactory());

        // This is the only object of timestamp oracle
        // TODO: make it singleton
        //TimestampOracle timestampOracle = new TimestampOracle();
        // The wrapper for the shared state of TSO
        state = BookKeeperStateBuilder.getState(this.config);

        if (state == null) {
            LOG.error("Couldn't build state");
            return;
        }

        state.addRecord(new byte[] { LoggerProtocol.LOGSTART }, new AddRecordCallback() {
            @Override
            public void addRecordComplete(int rc, Object ctx) {
            }
        }, null);

        TSOState.BATCH_SIZE = config.getBatchSize();
        System.out.println("PARAM MAX_ITEMS: " + TSOState.MAX_ITEMS);
        System.out.println("PARAM BATCH_SIZE: " + TSOState.BATCH_SIZE);
        System.out.println("PARAM LOAD_FACTOR: " + TSOState.LOAD_FACTOR);
        System.out.println("PARAM MAX_THREADS: " + maxThreads);

        final TSOHandler handler = new TSOHandler(channelGroup, state);
        handler.start();

        bootstrap.setPipelineFactory(new TSOPipelineFactory(pipelineExecutor, handler));
        bootstrap.setOption("tcpNoDelay", false);
        //setting buffer size can improve I/O
        bootstrap.setOption("child.sendBufferSize", 1048576);
        bootstrap.setOption("child.receiveBufferSize", 1048576);
        // better to have an receive buffer predictor
        bootstrap.setOption("receiveBufferSizePredictorFactory", new AdaptiveReceiveBufferSizePredictorFactory());
        //if the server is sending 1000 messages per sec, optimum write buffer water marks will
        //prevent unnecessary throttling, Check NioSocketChannelConfig doc
        bootstrap.setOption("writeBufferLowWaterMark", 32 * 1024);
        bootstrap.setOption("writeBufferHighWaterMark", 64 * 1024);

        bootstrap.setOption("child.tcpNoDelay", false);
        bootstrap.setOption("child.keepAlive", true);
        bootstrap.setOption("child.reuseAddress", true);
        bootstrap.setOption("child.connectTimeoutMillis", 60000);

        // *** Start the Netty running ***

        // Create the monitor
        ThroughputMonitor monitor = new ThroughputMonitor(state);
        // Add the parent channel to the group
        Channel channel = bootstrap.bind(new InetSocketAddress(config.getPort()));
        channelGroup.add(channel);

        // Compacter handler
        ChannelFactory comFactory = new NioServerSocketChannelFactory(Executors.newCachedThreadPool(),
                Executors.newCachedThreadPool(), (Runtime.getRuntime().availableProcessors() * 2 + 1) * 2);
        ServerBootstrap comBootstrap = new ServerBootstrap(comFactory);
        ChannelGroup comGroup = new DefaultChannelGroup("compacter");
        final CompacterHandler comHandler = new CompacterHandler(comGroup, state);
        comBootstrap.setPipelineFactory(new ChannelPipelineFactory() {

            @Override
            public ChannelPipeline getPipeline() throws Exception {
                ChannelPipeline pipeline = Channels.pipeline();
                pipeline.addLast("decoder", new ObjectDecoder());
                pipeline.addLast("encoder", new ObjectEncoder());
                pipeline.addLast("handler", comHandler);
                return pipeline;
            }
        });
        comBootstrap.setOption("tcpNoDelay", false);
        comBootstrap.setOption("child.tcpNoDelay", false);
        comBootstrap.setOption("child.keepAlive", true);
        comBootstrap.setOption("child.reuseAddress", true);
        comBootstrap.setOption("child.connectTimeoutMillis", 100);
        comBootstrap.setOption("readWriteFair", true);
        channel = comBootstrap.bind(new InetSocketAddress(config.getPort() + 1));

        // Starts the monitor
        monitor.start();
        synchronized (lock) {
            while (!finish) {
                try {
                    lock.wait();
                } catch (InterruptedException e) {
                    break;
                }
            }
        }

        //timestampOracle.stop();
        handler.stop();
        comHandler.stop();
        state.stop();

        // *** Start the Netty shutdown ***

        // End the monitor
        System.out.println("End of monitor");
        monitor.interrupt();
        // Now close all channels
        System.out.println("End of channel group");
        channelGroup.close().awaitUninterruptibly();
        comGroup.close().awaitUninterruptibly();
        // Close the executor for Pipeline
        System.out.println("End of pipeline executor");
        pipelineExecutor.shutdownNow();
        // Now release resources
        System.out.println("End of resources");
View Full Code Here

TOP

Related Classes of org.jboss.netty.channel.group.ChannelGroup

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.