Package org.elasticsearch.action.index

Examples of org.elasticsearch.action.index.IndexResponse


        createIndexWithAlias();
        ensureYellow("test");

        int numDocs = iterations(10, 50);
        for (int i = 0; i < numDocs; i++) {
            IndexResponse indexResponse = client().prepareIndex(indexOrAlias(), "type", Integer.toString(i)).setSource("field", "value-" + i).get();
            assertThat(indexResponse.isCreated(), equalTo(true));
            assertThat(indexResponse.getIndex(), equalTo("test"));
            assertThat(indexResponse.getType(), equalTo("type"));
            assertThat(indexResponse.getId(), equalTo(Integer.toString(i)));
        }
        refresh();

        String docId = Integer.toString(randomIntBetween(0, numDocs - 1));
        GetResponse getResponse = client().prepareGet(indexOrAlias(), "type", docId).get();
View Full Code Here


                            } else {
                                builder.field(Fields.STATUS, RestStatus.NOT_FOUND.getStatus());
                            }
                            builder.field(Fields.FOUND, deleteResponse.isFound());
                        } else if (itemResponse.getResponse() instanceof IndexResponse) {
                            IndexResponse indexResponse = itemResponse.getResponse();
                            if (indexResponse.isCreated()) {
                                builder.field(Fields.STATUS, RestStatus.CREATED.getStatus());
                            } else {
                                builder.field(Fields.STATUS, RestStatus.OK.getStatus());
                            }
                        } else if (itemResponse.getResponse() instanceof UpdateResponse) {
View Full Code Here

                                logger.info("[{}] Acquired semaphore and it has {} permits left", name, semaphore.availablePermits());
                                try {
                                    id = Integer.toString(idGenerator.incrementAndGet());
                                    int shard = ((InternalTestCluster) cluster()).getInstance(DjbHashFunction.class).hash(id) % numPrimaries;
                                    logger.trace("[{}] indexing id [{}] through node [{}] targeting shard [{}]", name, id, node, shard);
                                    IndexResponse response = client.prepareIndex("test", "type", id).setSource("{}").setTimeout("1s").get();
                                    assertThat(response.getVersion(), equalTo(1l));
                                    ackedDocs.put(id, node);
                                    logger.trace("[{}] indexed id [{}] through node [{}]", name, id, node);
                                } catch (ElasticsearchException e) {
                                    exceptedExceptions.add(e);
                                    logger.trace("[{}] failed id [{}] through node [{}]", e, name, id, node);
View Full Code Here

        scheme.startDisrupting();
        ensureStableCluster(2, notIsolatedNode);
        assertFalse(client(notIsolatedNode).admin().cluster().prepareHealth("test").setWaitForYellowStatus().get().isTimedOut());


        IndexResponse indexResponse = internalCluster().client(notIsolatedNode).prepareIndex("test", "type").setSource("field", "value").get();
        assertThat(indexResponse.getVersion(), equalTo(1l));

        logger.info("Verifying if document exists via node[" + notIsolatedNode + "]");
        GetResponse getResponse = internalCluster().client(notIsolatedNode).prepareGet("test", "type", indexResponse.getId())
                .setPreference("_local")
                .get();
        assertThat(getResponse.isExists(), is(true));
        assertThat(getResponse.getVersion(), equalTo(1l));
        assertThat(getResponse.getId(), equalTo(indexResponse.getId()));

        scheme.stopDisrupting();

        ensureStableCluster(3);
        ensureGreen("test");

        for (String node : nodes) {
            logger.info("Verifying if document exists after isolating node[" + isolatedNode + "] via node[" + node + "]");
            getResponse = internalCluster().client(node).prepareGet("test", "type", indexResponse.getId())
                    .setPreference("_local")
                    .get();
            assertThat(getResponse.isExists(), is(true));
            assertThat(getResponse.getVersion(), equalTo(1l));
            assertThat(getResponse.getId(), equalTo(indexResponse.getId()));
        }
    }
View Full Code Here

        logger.info("--> aliasing index [test] with [alias1]");
        assertAcked(admin().indices().prepareAliases().addAlias("test", "alias1"));

        logger.info("--> indexing against [alias1], should work now");
        IndexResponse indexResponse = client().index(indexRequest("alias1").type("type1").id("1").source(source("1", "test"))).actionGet();
        assertThat(indexResponse.getIndex(), equalTo("test"));

        logger.info("--> creating index [test_x]");
        createIndex("test_x");

        ensureGreen();

        logger.info("--> remove [alias1], Aliasing index [test_x] with [alias1]");
        assertAcked(admin().indices().prepareAliases().removeAlias("test", "alias1").addAlias("test_x", "alias1"));

        logger.info("--> indexing against [alias1], should work against [test_x]");
        indexResponse = client().index(indexRequest("alias1").type("type1").id("1").source(source("1", "test"))).actionGet();
        assertThat(indexResponse.getIndex(), equalTo("test_x"));
    }
View Full Code Here

        // now, start a node data, and see that it gets with shards
        internalCluster().startNode(settingsBuilder().put("node.data", true).build());
        assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("3").setLocal(true).execute().actionGet().isTimedOut(), equalTo(false));

        IndexResponse indexResponse = client().index(Requests.indexRequest("test").type("type1").id("1").source(source("1", "test"))).actionGet();
        assertThat(indexResponse.getId(), equalTo("1"));
        assertThat(indexResponse.getType(), equalTo("type1"));
    }
View Full Code Here

    private void canIndexDocument(String index) {
        try {
            IndexRequestBuilder builder = client().prepareIndex(index, "zzz");
            builder.setSource("foo", "bar");
            IndexResponse r = builder.execute().actionGet();
            assertThat(r, notNullValue());
        } catch (ClusterBlockException e) {
            fail();
        }
    }
View Full Code Here

                preVersionTypes[requestIndex] = indexRequest.versionType();
                try {
                    try {
                        WriteResult result = shardIndexOperation(request, indexRequest, clusterState, indexShard, true);
                        // add the response
                        IndexResponse indexResponse = result.response();
                        setResponse(item, new BulkItemResponse(item.id(), indexRequest.opType().lowercase(), indexResponse));
                        if (result.mappingTypeToUpdate != null) {
                            mappingTypesToUpdate.add(result.mappingTypeToUpdate);
                        }
                        if (result.op != null) {
                            if (ops == null) {
                                ops = new Engine.IndexingOperation[request.items().length];
                            }
                            ops[requestIndex] = result.op;
                        }
                    } catch (WriteFailure e) {
                        if (e.mappingTypeToUpdate != null) {
                            mappingTypesToUpdate.add(e.mappingTypeToUpdate);
                        }
                        throw e.getCause();
                    }
                } catch (Throwable e) {
                    // rethrow the failure if we are going to retry on primary and let parent failure to handle it
                    if (retryPrimaryException(e)) {
                        // restore updated versions...
                        for (int j = 0; j < requestIndex; j++) {
                            applyVersion(request.items()[j], preVersions[j], preVersionTypes[j]);
                        }
                        for (String mappingTypeToUpdate : mappingTypesToUpdate) {
                            DocumentMapper docMapper = indexService.mapperService().documentMapper(mappingTypeToUpdate);
                            if (docMapper != null) {
                                mappingUpdatedAction.updateMappingOnMaster(indexService.index().name(), docMapper, indexService.indexUUID());
                            }
                        }
                        throw (ElasticsearchException) e;
                    }
                    if (e instanceof ElasticsearchException && ((ElasticsearchException) e).status() == RestStatus.CONFLICT) {
                        logger.trace("{} failed to execute bulk item (index) {}", e, shardRequest.shardId, indexRequest);
                    } else {
                        logger.debug("{} failed to execute bulk item (index) {}", e, shardRequest.shardId, indexRequest);
                    }
                    // if its a conflict failure, and we already executed the request on a primary (and we execute it
                    // again, due to primary relocation and only processing up to N bulk items when the shard gets closed)
                    // then just use the response we got from the successful execution
                    if (item.getPrimaryResponse() != null && isConflictException(e)) {
                        setResponse(item, item.getPrimaryResponse());
                    } else {
                        setResponse(item, new BulkItemResponse(item.id(), indexRequest.opType().lowercase(),
                                new BulkItemResponse.Failure(request.index(), indexRequest.type(), indexRequest.id(), e)));
                    }
                }
            } else if (item.request() instanceof DeleteRequest) {
                DeleteRequest deleteRequest = (DeleteRequest) item.request();
                preVersions[requestIndex] = deleteRequest.version();
                preVersionTypes[requestIndex] = deleteRequest.versionType();

                try {
                    // add the response
                    DeleteResponse deleteResponse = shardDeleteOperation(request, deleteRequest, indexShard).response();
                    setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE, deleteResponse));
                } catch (Throwable e) {
                    // rethrow the failure if we are going to retry on primary and let parent failure to handle it
                    if (retryPrimaryException(e)) {
                        // restore updated versions...
                        for (int j = 0; j < requestIndex; j++) {
                            applyVersion(request.items()[j], preVersions[j], preVersionTypes[j]);
                        }
                        throw (ElasticsearchException) e;
                    }
                    if (e instanceof ElasticsearchException && ((ElasticsearchException) e).status() == RestStatus.CONFLICT) {
                        logger.trace("{} failed to execute bulk item (delete) {}", e, shardRequest.shardId, deleteRequest);
                    } else {
                        logger.debug("{} failed to execute bulk item (delete) {}", e, shardRequest.shardId, deleteRequest);
                    }
                    // if its a conflict failure, and we already executed the request on a primary (and we execute it
                    // again, due to primary relocation and only processing up to N bulk items when the shard gets closed)
                    // then just use the response we got from the successful execution
                    if (item.getPrimaryResponse() != null && isConflictException(e)) {
                        setResponse(item, item.getPrimaryResponse());
                    } else {
                        setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE,
                                new BulkItemResponse.Failure(request.index(), deleteRequest.type(), deleteRequest.id(), e)));
                    }
                }
            } else if (item.request() instanceof UpdateRequest) {
                UpdateRequest updateRequest = (UpdateRequest) item.request();
                preVersions[requestIndex] = updateRequest.version();
                preVersionTypes[requestIndex] = updateRequest.versionType();
                //  We need to do the requested retries plus the initial attempt. We don't do < 1+retry_on_conflict because retry_on_conflict may be Integer.MAX_VALUE
                for (int updateAttemptsCount = 0; updateAttemptsCount <= updateRequest.retryOnConflict(); updateAttemptsCount++) {
                    UpdateResult updateResult;
                    try {
                        updateResult = shardUpdateOperation(clusterState, request, updateRequest, indexShard);
                    } catch (Throwable t) {
                        updateResult = new UpdateResult(null, null, false, t, null);
                    }
                    if (updateResult.success()) {

                        switch (updateResult.result.operation()) {
                            case UPSERT:
                            case INDEX:
                                WriteResult result = updateResult.writeResult;
                                IndexRequest indexRequest = updateResult.request();
                                BytesReference indexSourceAsBytes = indexRequest.source();
                                // add the response
                                IndexResponse indexResponse = result.response();
                                UpdateResponse updateResponse = new UpdateResponse(indexResponse.getIndex(), indexResponse.getType(), indexResponse.getId(), indexResponse.getVersion(), indexResponse.isCreated());
                                if (updateRequest.fields() != null && updateRequest.fields().length > 0) {
                                    Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true);
                                    updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, shardRequest.request.index(), indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes));
                                }
                                item = request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), indexRequest);
                                setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResponse));
                                if (result.mappingTypeToUpdate != null) {
                                    mappingTypesToUpdate.add(result.mappingTypeToUpdate);
View Full Code Here

        }

        assert indexRequest.versionType().validateVersionForWrites(indexRequest.version());


        IndexResponse indexResponse = new IndexResponse(request.index(), indexRequest.type(), indexRequest.id(), version, created);
        return new WriteResult(indexResponse, mappingTypeToUpdate, op);
    }
View Full Code Here

            replicaNode = nodes.get(0);
        }
        logger.info("--> primary shard is on {}", primaryNode);

        // Index a document to make sure everything works well
        IndexResponse resp = internalCluster().client(primaryNode).prepareIndex(INDEX, "doc").setSource("foo", "bar").get();
        assertThat("document exists on primary node",
                internalCluster().client(primaryNode).prepareGet(INDEX, "doc", resp.getId()).setPreference("_only_local").get().isExists(),
                equalTo(true));
        assertThat("document exists on replica node",
                internalCluster().client(replicaNode).prepareGet(INDEX, "doc", resp.getId()).setPreference("_only_local").get().isExists(),
                equalTo(true));

        // Disrupt the network so indexing requests fail to replicate
        logger.info("--> preventing index/replica operations");
        TransportService mockTransportService = internalCluster().getInstance(TransportService.class, primaryNode);
        ((MockTransportService) mockTransportService).addFailToSendNoConnectRule(
                internalCluster().getInstance(Discovery.class, replicaNode).localNode(),
                ImmutableSet.of(IndexAction.NAME + "[r]")
        );
        mockTransportService = internalCluster().getInstance(TransportService.class, replicaNode);
        ((MockTransportService) mockTransportService).addFailToSendNoConnectRule(
                internalCluster().getInstance(Discovery.class, primaryNode).localNode(),
                ImmutableSet.of(IndexAction.NAME + "[r]")
        );

        logger.info("--> indexing into primary");
        // the replica shard should now be marked as failed because the replication operation will fail
        resp = internalCluster().client(primaryNode).prepareIndex(INDEX, "doc").setSource("foo", "baz").get();
        // wait until the cluster reaches an exact yellow state, meaning replica has failed
        assertBusy(new Runnable() {
            @Override
            public void run() {
                assertThat(client().admin().cluster().prepareHealth().get().getStatus(), equalTo(ClusterHealthStatus.YELLOW));
            }
        });
        assertThat("document should still be indexed and available",
                client().prepareGet(INDEX, "doc", resp.getId()).get().isExists(), equalTo(true));

        state = getNodeClusterState(randomFrom(nodes.toArray(Strings.EMPTY_ARRAY)));
        RoutingNodes rn = state.routingNodes();
        logger.info("--> counts: total: {}, unassigned: {}, initializing: {}, relocating: {}, started: {}",
                rn.shards(new Predicate<MutableShardRouting>() {
View Full Code Here

TOP

Related Classes of org.elasticsearch.action.index.IndexResponse

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.