Package org.exist.numbering

Examples of org.exist.numbering.NodeId


        public boolean indexInfo(Value value, long pointer) throws TerminatedException {
            if (parent != null) {
                parent.getContext().proceed(parent);
            }
            final byte[] key = value.getData();
            final NodeId nodeId = readNodeId(key, pointer);
            final DocumentImpl doc = docs.getDoc(readDocId(key));
            if (doc != null) {
                if (selector == null) {
                    final NodeProxy storedNode = new NodeProxy(doc, nodeId,
                        type == ElementValue.ATTRIBUTE ? Node.ATTRIBUTE_NODE : Node.ELEMENT_NODE, pointer);
View Full Code Here


        public boolean indexInfo(Value value, long pointer) throws TerminatedException {
            if (parent != null) {
                parent.getContext().proceed(parent);
            }
            final NodeId nodeId = readNodeId(value.getData(), pointer);

            boolean match = axis == Constants.DESCENDANT_SELF_AXIS || axis == Constants.DESCENDANT_ATTRIBUTE_AXIS;
            if (!match) {
                final int relation = nodeId.computeRelation(ancestor.getNodeId());
                match = (((axis == Constants.CHILD_AXIS) || (axis == Constants.ATTRIBUTE_AXIS)) && (relation == NodeId.IS_CHILD)) ||
                    ((axis == Constants.DESCENDANT_AXIS) && ((relation == NodeId.IS_DESCENDANT) || (relation == NodeId.IS_CHILD)));
            }
            if (match) {
                final NodeProxy storedNode =
View Full Code Here

                    os.writeInt(occurences.getTermCount());
                    //Mark position
                    final int lenOffset = os.position();
                    //Dummy value : actual one will be written below
                    os.writeFixedInt(0);
                    NodeId previous = null;
                    for (int m = 0; m < occurences.getSize(); ) {
                        try {
                            previous = occurences.getNode(m).write(previous, os);
                        } catch (final IOException e) {
                            LOG.error("IOException while writing fulltext index: " + e.getMessage(), e);
View Full Code Here

                                os.writeFixedInt(length);
                                is.copyRaw(os, length);
                            } else {
                                // data are related to our section and document:
                                // feed the new list with the GIDs
                                NodeId previous = null;
                                for (int m = 0; m < termCount; m++) {
                                    NodeId nodeId = broker.getBrokerPool()
                                        .getNodeFactory().createFromStream(previous, is);
                                    previous = nodeId;
                                    final int freq = is.readInt();
                                    // add the node to the new list if it is not
                                    // in the list of removed nodes
                                    if (!storedOccurencesList.contains(nodeId)) {
                                        for (int n = 0; n < freq; n++) {
                                            newOccurencesList.add(nodeId, is.readInt());
                                        }
                                    } else {
                                        is.skip(freq);
                                    }
                                }
                            }
                        }
                        //append the data from the new list
                        if (newOccurencesList.getSize() > 0) {
                            //Don't forget this one
                            newOccurencesList.sort();
                            os.writeInt(this.doc.getDocId());
                            os.writeByte(currentSection);
                            os.writeInt(newOccurencesList.getTermCount());
                            //Mark position
                            final int lenOffset = os.position();
                            //Dummy value : actual one will be written below
                            os.writeFixedInt(0);
                            NodeId previous = null;
                            for (int m = 0; m < newOccurencesList.getSize();) {
                                previous = newOccurencesList.getNode(m).write(previous, os);
                                int freq = newOccurencesList.getOccurrences(m);
                                os.writeInt(freq);
                                for (int n = 0; n < freq; n++) {
View Full Code Here

        super.startElement(qname, attribs);
    }

    @Override
    public void characters(CharSequence seq) throws SAXException {
        NodeId nodeId = getCurrentNode().getNodeId();
        Offset offset = nodesWithMatch.get(nodeId);
        if (offset == null)
            super.characters(seq);
        else {
            String s = seq.toString();
View Full Code Here

                    case XMLStreamConstants.START_ELEMENT:
                        ++level;
                        textOffset += extractor.startElement(reader.getQName());
                        break;
                    case XMLStreamConstants.CHARACTERS:
                        NodeId nodeId = (NodeId) reader.getProperty(ExtendedXMLStreamReader.PROPERTY_NODE_ID);
                        textOffset += extractor.beforeCharacters();
                        offsets.add(textOffset, nodeId);
                        textOffset += extractor.characters(reader.getXMLText());
                        break;
                }
            }
        } catch (IOException | XMLStreamException e) {
            LOG.warn("Problem found while serializing XML: " + e.getMessage(), e);
        }
       
        // Retrieve the Analyzer for the NodeProxy that was used for
  // indexing and querying.
        Analyzer analyzer = idxConf.getAnalyzer();
        if (analyzer == null) {
      // Otherwise use system default Lucene analyzer (from conf.xml)
      // to tokenize the text and find matching query terms.
      analyzer = index.getDefaultAnalyzer();
        }
        LOG.debug("Analyzer: " + analyzer + " for path: " + path);
        String str = extractor.getText().toString();
        //Token token;
        try {

            TokenStream tokenStream = analyzer.tokenStream(null, new StringReader(str));
            tokenStream.reset();
            MarkableTokenFilter stream = new MarkableTokenFilter(tokenStream);
            while (stream.incrementToken()) {
                String text = stream.getAttribute(CharTermAttribute.class).toString();
                Query query = termMap.get(text);
                if (query != null) {
                    // Phrase queries need to be handled differently to filter
                    // out wrong matches: only the phrase should be marked, not
        // single words which may also occur elsewhere in the document
                    if (query instanceof PhraseQuery) {
                        PhraseQuery phraseQuery = (PhraseQuery) query;
                        Term[] terms = phraseQuery.getTerms();
                        if (text.equals(terms[0].text())) {
                            // Scan the following text and collect tokens to see
          // if they are part of the phrase.
                            stream.mark();
                            int t = 1;
                            List<State> stateList = new ArrayList<>(terms.length);
                            stateList.add(stream.captureState());
                           
                            while (stream.incrementToken() && t < terms.length) {
                                text = stream.getAttribute(CharTermAttribute.class).toString();
                                if (text.equals(terms[t].text())) {
                                    stateList.add(stream.captureState());
                                    if (++t == terms.length) {
                                        break;
                                    }
                                } else {
            // Don't reset the token stream since we will
            // miss matches. /ljo
                                    //stream.reset();
                                    break;
                                }
                            }
                           
                            if (stateList.size() == terms.length) {
                                // we indeed have a phrase match. record the offsets of its terms.
                                int lastIdx = -1;
                                for (int i = 0; i < terms.length; i++) {
                                    stream.restoreState(stateList.get(i));
                                   
                                    OffsetAttribute offsetAttr = stream.getAttribute(OffsetAttribute.class);
                                    int idx = offsets.getIndex(offsetAttr.startOffset());
                                   
                                    NodeId nodeId = offsets.ids[idx];
                                    Offset offset = nodesWithMatch.get(nodeId);
                                    if (offset != null)
                                        if (lastIdx == idx)
                                            offset.setEndOffset(offsetAttr.endOffset() - offsets.offsets[idx]);
                                        else
                                            offset.add(offsetAttr.startOffset() - offsets.offsets[idx],
                   offsetAttr.endOffset() - offsets.offsets[idx]);
                                    else
                                        nodesWithMatch.put(nodeId, new Offset(offsetAttr.startOffset() - offsets.offsets[idx],
                        offsetAttr.endOffset() - offsets.offsets[idx]));
                                    lastIdx = idx;
                                }
                            }
                        } // End of phrase handling
                    } else {
                       
                        OffsetAttribute offsetAttr = stream.getAttribute(OffsetAttribute.class);
                        int idx = offsets.getIndex(offsetAttr.startOffset());
                        NodeId nodeId = offsets.ids[idx];
                        Offset offset = nodesWithMatch.get(nodeId);
                        if (offset != null)
                            offset.add(offsetAttr.startOffset() - offsets.offsets[idx],
               offsetAttr.endOffset() - offsets.offsets[idx]);
                        else {
View Full Code Here

            }
            BytesRef ref = new BytesRef(buf);
            this.nodeIdValues.get(doc, ref);

            int units = ByteConversion.byteToShort(ref.bytes, ref.offset);
            NodeId nodeId = index.getBrokerPool().getNodeFactory().createFromData(units, ref.bytes, ref.offset + 2);

            // if a context set is specified, we can directly check if the
            // matching node is a descendant of one of the nodes
            // in the context set.
            if (contextSet != null) {
View Full Code Here

                if (storedDocument == null)
                    return;
                BytesRef ref = new BytesRef(buf);
                this.nodeIdValues.get(doc, ref);
                int units = ByteConversion.byteToShort(ref.bytes, ref.offset);
                NodeId nodeId = index.getBrokerPool().getNodeFactory().createFromData(units, ref.bytes, ref.offset + 2);
                //LOG.info("doc: " + docId + "; node: " + nodeId.toString() + "; units: " + units);

                NodeProxy storedNode = new NodeProxy(storedDocument, nodeId);
                if (qname != null)
                    storedNode.setNodeType(qname.getNameType() == ElementValue.ATTRIBUTE ? Node.ATTRIBUTE_NODE : Node.ELEMENT_NODE);
View Full Code Here

                    "?"
                + ")"
            );

        try {
            NodeId nodeId = null;
            SRSGeometry srsGeometry = null;
          for (Map.Entry<NodeId, SRSGeometry> entry : geometries.entrySet()) {
                nodeId = entry.getKey();
                srsGeometry = entry.getValue();
               
View Full Code Here

                    //Ignore since the broker has no right on the document
                    continue;
                }
                //contextSet == null should be used to scan the whole index
                if (contextSet == null || refine_query_on_doc || contextSet.getDocumentSet().contains(doc.getDocId())) {
                    NodeId nodeId = new DLN(rs.getInt("NODE_ID_UNITS"), rs.getBytes("NODE_ID"), 0);
                    NodeProxy p = new NodeProxy(doc, nodeId);
                    //Node is in the context : check if it is accurate
                    //contextSet.contains(p) would have made more sense but there is a problem with
                    //VirtualNodeSet when on the DESCENDANT_OR_SELF axis
                    if (contextSet == null || contextSet.get(p) != null) {
View Full Code Here

TOP

Related Classes of org.exist.numbering.NodeId

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.