Package org.elasticsearch.cluster.node

Examples of org.elasticsearch.cluster.node.DiscoveryNode


    @Test public void testSimplePings() {
        ThreadPool threadPool = new ThreadPool();
        ClusterName clusterName = new ClusterName("test");
        NettyTransport transportA = new NettyTransport(threadPool);
        final TransportService transportServiceA = new TransportService(transportA, threadPool).start();
        final DiscoveryNode nodeA = new DiscoveryNode("A", transportServiceA.boundAddress().publishAddress());

        InetSocketTransportAddress addressA = (InetSocketTransportAddress) transportA.boundAddress().publishAddress();

        NettyTransport transportB = new NettyTransport(threadPool);
        final TransportService transportServiceB = new TransportService(transportB, threadPool).start();
        final DiscoveryNode nodeB = new DiscoveryNode("B", transportServiceA.boundAddress().publishAddress());

        InetSocketTransportAddress addressB = (InetSocketTransportAddress) transportB.boundAddress().publishAddress();

        Settings hostsSettings = ImmutableSettings.settingsBuilder().putArray("discovery.zen.ping.unicast.hosts",
                addressA.address().getAddress().getHostAddress() + ":" + addressA.address().getPort(),
View Full Code Here


    @Test public void testSimplePings() {
        ThreadPool threadPool = new ThreadPool();
        ClusterName clusterName = new ClusterName("test");
        final TransportService transportServiceA = new TransportService(new LocalTransport(threadPool), threadPool).start();
        final DiscoveryNode nodeA = new DiscoveryNode("A", transportServiceA.boundAddress().publishAddress());

        final TransportService transportServiceB = new TransportService(new LocalTransport(threadPool), threadPool).start();
        final DiscoveryNode nodeB = new DiscoveryNode("B", transportServiceA.boundAddress().publishAddress());

        MulticastZenPing zenPingA = new MulticastZenPing(threadPool, transportServiceA, clusterName);
        zenPingA.setNodesProvider(new DiscoveryNodesProvider() {
            @Override public DiscoveryNodes nodes() {
                return DiscoveryNodes.newNodesBuilder().put(nodeA).localNodeId("A").build();
View Full Code Here

@Test
public class SimpleLocalTransportTests extends AbstractSimpleTransportTests {

    @Override protected void build() {
        serviceA = new TransportService(new LocalTransport(threadPool), threadPool).start();
        serviceANode = new DiscoveryNode("A", serviceA.boundAddress().publishAddress());

        serviceB = new TransportService(new LocalTransport(threadPool), threadPool).start();
        serviceBNode = new DiscoveryNode("B", serviceB.boundAddress().publishAddress());
    }
View Full Code Here

    public void close() {
        transportService.removeHandler(PublishClusterStateRequestHandler.ACTION);
    }

    public void publish(ClusterState clusterState) {
        DiscoveryNode localNode = nodesProvider.nodes().localNode();

        // serialize the cluster state here, so we won't do it several times per node
        CachedStreamOutput.Entry cachedEntry = CachedStreamOutput.popEntry();
        byte[] clusterStateInBytes;
        try {
View Full Code Here

        @Override public void run() {
            if (!running) {
                // return and don't spawn...
                return;
            }
            final DiscoveryNode masterToPing = masterNode;
            if (masterToPing == null) {
                // master is null, should not happen, but we are still running, so reschedule
                threadPool.schedule(pingInterval, ThreadPool.Names.SAME, MasterPinger.this);
                return;
            }
            transportService.sendRequest(masterToPing, MasterPingRequestHandler.ACTION, new MasterPingRequest(nodesProvider.nodes().localNode().id(), masterToPing.id()), options().withHighType().withTimeout(pingRetryTimeout),
                    new BaseTransportResponseHandler<MasterPingResponseResponse>() {
                        @Override public MasterPingResponseResponse newInstance() {
                            return new MasterPingResponseResponse();
                        }

                        @Override public void handleResponse(MasterPingResponseResponse response) {
                            if (!running) {
                                return;
                            }
                            // reset the counter, we got a good result
                            MasterFaultDetection.this.retryCount = 0;
                            // check if the master node did not get switched on us..., if it did, we simply return with no reschedule
                            if (masterToPing.equals(MasterFaultDetection.this.masterNode())) {
                                if (!response.connectedToMaster) {
                                    logger.trace("[master] [{}] does not have us registered with it...", masterToPing);
                                    notifyDisconnectedFromMaster();
                                }
                                // we don't stop on disconnection from master, we keep pinging it
                                threadPool.schedule(pingInterval, ThreadPool.Names.SAME, MasterPinger.this);
                            }
                        }

                        @Override public void handleException(TransportException exp) {
                            if (!running) {
                                return;
                            }
                            if (exp instanceof ConnectTransportException) {
                                // ignore this one, we already handle it by registering a connection listener
                                return;
                            }
                            synchronized (masterNodeMutex) {
                                // check if the master node did not get switched on us...
                                if (masterToPing.equals(MasterFaultDetection.this.masterNode())) {
                                    if (exp.getCause() instanceof NoLongerMasterException) {
                                        logger.debug("[master] pinging a master {} that is no longer a master", masterNode, pingRetryCount, pingRetryTimeout);
                                        notifyMasterFailure(masterToPing, "no longer master");
                                    }
                                    int retryCount = ++MasterFaultDetection.this.retryCount;
                                    logger.trace("[master] failed to ping [{}], retry [{}] out of [{}]", exp, masterNode, retryCount, pingRetryCount);
                                    if (retryCount >= pingRetryCount) {
                                        logger.debug("[master] failed to ping [{}], tried [{}] times, each with maximum [{}] timeout", masterNode, pingRetryCount, pingRetryTimeout);
                                        // not good, failure
                                        notifyMasterFailure(masterToPing, "failed to ping, tried [" + pingRetryCount + "] times, each with  maximum [" + pingRetryTimeout + "] timeout");
                                    } else {
                                        // resend the request, not reschedule, rely on send timeout
                                        transportService.sendRequest(masterToPing, MasterPingRequestHandler.ACTION, new MasterPingRequest(nodesProvider.nodes().localNode().id(), masterToPing.id()), options().withHighType().withTimeout(pingRetryTimeout), this);
                                    }
                                }
                            }
                        }
View Full Code Here

            // recovery from primary
            IndexShardRoutingTable shardRoutingTable = routingTable.index(shardRouting.index()).shard(shardRouting.id());
            for (ShardRouting entry : shardRoutingTable) {
                if (entry.primary() && entry.started()) {
                    // only recover from started primary, if we can't find one, we will do it next round
                    final DiscoveryNode sourceNode = nodes.get(entry.currentNodeId());
                    try {
                        // we are recovering a backup from a primary, so no need to mark it as relocated
                        final StartRecoveryRequest request = new StartRecoveryRequest(indexShard.shardId(), sourceNode, nodes.localNode(), false, indexShard.store().list());
                        recoveryTarget.startRecovery(request, false, new PeerRecoveryListener(request, shardRouting, indexService));
                    } catch (Exception e) {
                        handleRecoveryFailure(indexService, shardRouting, true, e);
                        break;
                    }
                    break;
                }
            }
        } else {
            if (shardRouting.relocatingNodeId() == null) {
                // we are the first primary, recover from the gateway
                // if its post api allocation, the index should exists
                boolean indexShouldExists = indexShardRouting.allocatedPostApi();
                IndexShardGatewayService shardGatewayService = indexService.shardInjector(shardId).getInstance(IndexShardGatewayService.class);
                shardGatewayService.recover(indexShouldExists, new IndexShardGatewayService.RecoveryListener() {
                    @Override public void onRecoveryDone() {
                        shardStateAction.shardStarted(shardRouting, "after recovery from gateway");
                    }

                    @Override public void onIgnoreRecovery(String reason) {
                    }

                    @Override public void onRecoveryFailed(IndexShardGatewayRecoveryException e) {
                        handleRecoveryFailure(indexService, shardRouting, true, e);
                    }
                });
            } else {
                // relocating primaries, recovery from the relocating shard
                final DiscoveryNode sourceNode = nodes.get(shardRouting.relocatingNodeId());
                try {
                    // we don't mark this one as relocated at the end, requests in any case are routed to both when its relocating
                    // and that way we handle the edge case where its mark as relocated, and we might need to roll it back...
                    final StartRecoveryRequest request = new StartRecoveryRequest(indexShard.shardId(), sourceNode, nodes.localNode(), false, indexShard.store().list());
                    recoveryTarget.startRecovery(request, false, new PeerRecoveryListener(request, shardRouting, indexService));
View Full Code Here

@Test
public class SimpleNettyTransportTests extends AbstractSimpleTransportTests {

    @Override protected void build() {
        serviceA = new TransportService(settingsBuilder().put("name", "A").build(), new NettyTransport(settingsBuilder().put("name", "A").build(), threadPool), threadPool).start();
        serviceANode = new DiscoveryNode("A", serviceA.boundAddress().publishAddress());

        serviceB = new TransportService(settingsBuilder().put("name", "B").build(), new NettyTransport(settingsBuilder().put("name", "B").build(), threadPool), threadPool).start();
        serviceBNode = new DiscoveryNode("B", serviceB.boundAddress().publishAddress());
    }
View Full Code Here

        super.testVoidMessageCompressed();    //To change body of overridden methods use File | Settings | File Templates.
    }

    @Test public void testConnectException() {
        try {
            serviceA.connectToNode(new DiscoveryNode("C", new InetSocketTransportAddress("localhost", 9876)));
            assert false;
        } catch (ConnectTransportException e) {
//            e.printStackTrace();
            // all is well
        }
View Full Code Here

    @Override protected void doStart() throws ElasticSearchException {
        Map<String, String> nodeAttributes = buildCommonNodesAttributes(settings);
        // note, we rely on the fact that its a new id each time we start, see FD and "kill -9" handling
        String nodeId = UUID.randomBase64UUID();
        localNode = new DiscoveryNode(settings.get("name"), nodeId, transportService.boundAddress().publishAddress(), nodeAttributes);
        latestDiscoNodes = new DiscoveryNodes.Builder().put(localNode).localNodeId(localNode.id()).build();
        nodesFD.updateNodes(latestDiscoNodes);
        pingService.start();

        // do the join on a different thread, the DiscoveryService waits for 30s anyhow till it is discovered
View Full Code Here

        while (retry) {
            if (lifecycle.stoppedOrClosed()) {
                return;
            }
            retry = false;
            DiscoveryNode masterNode = findMaster();
            if (masterNode == null) {
                retry = true;
                continue;
            }
            if (localNode.equals(masterNode)) {
View Full Code Here

TOP

Related Classes of org.elasticsearch.cluster.node.DiscoveryNode

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.