Package org.elasticsearch.index.mapper

Examples of org.elasticsearch.index.mapper.Uid


                        .routing(index.routing()).parent(index.parent())).version(index.version())
                        .origin(Engine.Operation.Origin.RECOVERY));
                break;
            case DELETE:
                Translog.Delete delete = (Translog.Delete) operation;
                Uid uid = Uid.createUid(delete.uid().text());
                engine.delete(new Engine.Delete(uid.type(), uid.id(), delete.uid()).version(delete.version())
                        .origin(Engine.Operation.Origin.RECOVERY));
                break;
            case DELETE_BY_QUERY:
                Translog.DeleteByQuery deleteByQuery = (Translog.DeleteByQuery) operation;
                innerDeleteByQuery(deleteByQuery.source(), deleteByQuery.filteringAliases(), deleteByQuery.types());
View Full Code Here


                        do {
                            Term term = termEnum.term();
                            if (term == null || term.field() != field) break;
                            // TODO we can optimize this, since type is the prefix, and we get terms ordered
                            // so, only need to move to the next type once its different
                            Uid uid = Uid.createUid(term.text());

                            TypeBuilder typeBuilder = readerBuilder.get(uid.type());
                            if (typeBuilder == null) {
                                typeBuilder = new TypeBuilder(reader);
                                readerBuilder.put(StringHelper.intern(uid.type()), typeBuilder);
                            }

                            BytesWrap idAsBytes = checkIfCanReuse(builders, new BytesWrap(uid.id()));
                            termDocs.seek(termEnum);
                            while (termDocs.next()) {
                                // when traversing, make sure to ignore deleted docs, so the key->docId will be correct
                                if (!reader.isDeleted(termDocs.doc())) {
                                    typeBuilder.idToDoc.put(idAsBytes, termDocs.doc());
                                }
                            }
                        } while (termEnum.next());
                    } finally {
                        termDocs.close();
                        termEnum.close();
                    }
                }

                // now, go and load the docId->parentId map

                for (IndexReader reader : readers) {
                    if (idReaders.containsKey(reader.getCoreCacheKey())) {
                        // no need, continue
                        continue;
                    }

                    Map<String, TypeBuilder> readerBuilder = builders.get(reader.getCoreCacheKey());

                    String field = StringHelper.intern(ParentFieldMapper.NAME);
                    TermDocs termDocs = reader.termDocs();
                    TermEnum termEnum = reader.terms(new Term(field));
                    try {
                        do {
                            Term term = termEnum.term();
                            if (term == null || term.field() != field) break;
                            // TODO we can optimize this, since type is the prefix, and we get terms ordered
                            // so, only need to move to the next type once its different
                            Uid uid = Uid.createUid(term.text());

                            TypeBuilder typeBuilder = readerBuilder.get(uid.type());
                            if (typeBuilder == null) {
                                typeBuilder = new TypeBuilder(reader);
                                readerBuilder.put(StringHelper.intern(uid.type()), typeBuilder);
                            }

                            BytesWrap idAsBytes = checkIfCanReuse(builders, new BytesWrap(uid.id()));
                            boolean added = false; // optimize for when all the docs are deleted for this id

                            termDocs.seek(termEnum);
                            while (termDocs.next()) {
                                // ignore deleted docs while we are at it
View Full Code Here

        InternalSearchHit[] hits = new InternalSearchHit[context.docIdsToLoadSize()];
        for (int index = 0; index < context.docIdsToLoadSize(); index++) {
            int docId = context.docIdsToLoad()[context.docIdsToLoadFrom() + index];
            Document doc = loadDocument(context, fieldSelector, docId);
            Uid uid = extractUid(context, doc);

            DocumentMapper documentMapper = context.mapperService().documentMapper(uid.type());

            if (documentMapper == null) {
                throw new TypeMissingException(new Index(context.shardTarget().index()), uid.type(), "failed to find type loaded for doc [" + uid.id() + "]");
            }

            byte[] source = extractSource(doc, documentMapper);

            // get the version

            InternalSearchHit searchHit = new InternalSearchHit(docId, uid.id(), uid.type(), source, null);
            hits[index] = searchHit;

            for (Object oField : doc.getFields()) {
                Fieldable field = (Fieldable) oField;
                String name = field.name();
View Full Code Here

    }

    @Override
    public void collect(int doc) throws IOException {
        lookup.setNextDocId(doc);
        Uid uid = Uid.createUid(((ScriptDocValues.Strings) lookup.doc().get("_uid")).getValue());
        collect(uid);
    }
View Full Code Here

        public void collect(int doc) {
            try {
                UidAndRoutingFieldsVisitor fieldsVisitor = new UidAndRoutingFieldsVisitor();
                context.reader().document(doc, fieldsVisitor);
                Uid uid = fieldsVisitor.uid();
                final long version = Versions.loadVersion(context.reader(), new Term(UidFieldMapper.NAME, uid.toBytesRef()));
                docsToPurge.add(new DocToPurge(uid.type(), uid.id(), version, fieldsVisitor.routing()));
            } catch (Exception e) {
                logger.trace("failed to collect doc", e);
            }
        }
View Full Code Here

                    engine.index(engineIndex);
                    indexOperation = engineIndex;
                    break;
                case DELETE:
                    Translog.Delete delete = (Translog.Delete) operation;
                    Uid uid = Uid.createUid(delete.uid().text());
                    engine.delete(new Engine.Delete(uid.type(), uid.id(), delete.uid(), delete.version(),
                            delete.versionType().versionTypeForReplicationAndRecovery(), Engine.Operation.Origin.RECOVERY, System.nanoTime(), false));
                    break;
                case DELETE_BY_QUERY:
                    Translog.DeleteByQuery deleteByQuery = (Translog.DeleteByQuery) operation;
                    engine.delete(prepareDeleteByQuery(deleteByQuery.source(), deleteByQuery.filteringAliases(), Engine.Operation.Origin.RECOVERY, deleteByQuery.types()));
View Full Code Here

                if (versionValue != null) {
                    if (versionValue.delete()) {
                        return GetResult.NOT_EXISTS;
                    }
                    if (get.versionType().isVersionConflictForReads(versionValue.version(), get.version())) {
                        Uid uid = Uid.createUid(get.uid().text());
                        throw new VersionConflictEngineException(shardId, uid.type(), uid.id(), versionValue.version(), get.version());
                    }
                    if (!get.loadSource()) {
                        return new GetResult(true, versionValue.version(), null);
                    }
                    Translog.Operation op = translog.read(versionValue.translogLocation());
                    if (op != null) {
                        return new GetResult(true, versionValue.version(), op.getSource());
                    }
                }
            }

            // no version, get the version from the index, we know that we refresh on flush
            final Searcher searcher = acquireSearcher("get");
            final Versions.DocIdAndVersion docIdAndVersion;
            try {
                docIdAndVersion = Versions.loadDocIdAndVersion(searcher.reader(), get.uid());
            } catch (Throwable e) {
                Releasables.closeWhileHandlingException(searcher);
                //TODO: A better exception goes here
                throw new EngineException(shardId(), "Couldn't resolve version", e);
            }

            if (docIdAndVersion != null) {
                if (get.versionType().isVersionConflictForReads(docIdAndVersion.version, get.version())) {
                    Releasables.close(searcher);
                    Uid uid = Uid.createUid(get.uid().text());
                    throw new VersionConflictEngineException(shardId, uid.type(), uid.id(), docIdAndVersion.version, get.version());
                }
            }

            if (docIdAndVersion != null) {
                // don't release the searcher on this path, it is the responsability of the caller to call GetResult.release
View Full Code Here

TOP

Related Classes of org.elasticsearch.index.mapper.Uid

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.