Package org.apache.lucene.util

Examples of org.apache.lucene.util.IntsRef


    meta.writeVInt(field.number);
    meta.writeByte(FST);
    meta.writeLong(data.getFilePointer());
    PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton();
    Builder<Long> builder = new Builder<Long>(INPUT_TYPE.BYTE1, outputs);
    IntsRef scratch = new IntsRef();
    long ord = 0;
    for (BytesRef v : values) {
      builder.add(Util.toIntsRef(v, scratch), ord);
      ord++;
    }
View Full Code Here


   
    System.out.println("  encode...");

    PositiveIntOutputs fstOutput = PositiveIntOutputs.getSingleton();
    Builder<Long> fstBuilder = new Builder<Long>(FST.INPUT_TYPE.BYTE2, 0, 0, true, true, Integer.MAX_VALUE, fstOutput, null, true, PackedInts.DEFAULT, true, 15);
    IntsRef scratch = new IntsRef();
    long ord = -1; // first ord will be 0
    String lastValue = null;

    // build tokeninfo dictionary
    for (String[] entry : lines) {
      int next = dictionary.put(entry);
       
      if(next == offset){
        System.out.println("Failed to process line: " + Arrays.toString(entry));
        continue;
      }
     
      String token = entry[0];
      if (!token.equals(lastValue)) {
        // new word to add to fst
        ord++;
        lastValue = token;
        scratch.grow(token.length());
        scratch.length = token.length();
        for (int i = 0; i < token.length(); i++) {
          scratch.ints[i] = (int) token.charAt(i);
        }
        fstBuilder.add(scratch, ord);
View Full Code Here

      int[] ords = new int[maxDoc]; // let's assume one ordinal per-document as an initial size

      // this aggregator is limited to Integer.MAX_VALUE total ordinals.
      int totOrds = 0;
      final IntDecoder decoder = clp.createEncoder().createMatchingDecoder();
      final IntsRef values = new IntsRef(32);
      for (int docID = 0; docID < maxDoc; docID++) {
        offsets[docID] = totOrds;
        dv.get(docID, buf);
        if (buf.length > 0) {
          // this document has facets
View Full Code Here

   *
   * @return true iff the current candidate document has a non excluded required
   * node.
   */
  private boolean toNonExcludedNode() throws IOException {
    IntsRef reqNode = reqScorer.node(); // may be excluded
    IntsRef exclNode = exclScorer.node();

    int comparison;
    while ((comparison = NodeUtils.compare(reqNode, exclNode)) >= 0) {
      // if node equal, advance to next node in reqScorer
      if (comparison == 0 && !reqScorer.nextNode()) {
View Full Code Here

  @Test
  public void testIncompleteFrame() throws IOException {
    final BlockCompressor compressor = new AForBlockCompressor();

    final IntsRef input = new IntsRef(64);
    final BytesRef output = new BytesRef(compressor.maxCompressedSize(64));

    // fill first part with 1
    for (int i = 0; i < 33; i++) {
      input.ints[i] = 1;
View Full Code Here

    BytesRef docCompressedBuffer;
    BytesRef nodFreqCompressedBuffer;

    public DocsFreqBlockWriter() {
      // ensure that the input buffers has the minimum size required
      docBuffer = new IntsRef(this.getMinimumBufferSize(maxBlockSize, docCompressor.getWindowSize()));
      nodFreqBuffer = new IntsRef(this.getMinimumBufferSize(maxBlockSize, freqCompressor.getWindowSize()));

      // determine max size of compressed buffer to avoid overflow
      int size = docCompressor.maxCompressedSize(maxBlockSize);
      docCompressedBuffer = new BytesRef(size);
View Full Code Here

    return scorer.doc();
  }

  @Override
  public IntsRef node() {
    final IntsRef node = scorer.node();
    // resize node array only if node is not a sentinel value
    if (node.length > ancestorLevel &&
        node.ints[node.offset] != -1 &&
        node != DocsAndNodesIterator.NO_MORE_NOD) {
      node.length = ancestorLevel;
View Full Code Here

    BytesRef termFreqCompressedBuffer;

    public NodBlockWriter() {
      // ensure that the input buffers has the minimum size required
      // maxBlockSize is just use as a minimum initial capacity for the buffers
      nodLenBuffer = new IntsRef(this.getMinimumBufferSize(maxBlockSize, nodCompressor.getWindowSize()));
      nodBuffer = new IntsRef(this.getMinimumBufferSize(maxBlockSize, nodCompressor.getWindowSize()));
      termFreqBuffer = new IntsRef(this.getMinimumBufferSize(maxBlockSize, nodCompressor.getWindowSize()));

      // init of the compressed buffers
      nodLenCompressedBuffer = new BytesRef();
      nodCompressedBuffer = new BytesRef();
      termFreqCompressedBuffer = new BytesRef();
View Full Code Here

    // we always receive node ids in the payload
    assert payload != null;

    // decode payload
    sirenPayload.decode(payload);
    final IntsRef node = sirenPayload.getNode();

    // check if we received the same node
    // TODO: we pay the cost of decoding the node before testing the equality
    // we could instead directly compute the node hash based on the byte array
    final int nodeHash = node.hashCode();
    if (lastNodeHash != nodeHash) { // if different node
      // add term freq for previous node if not first payload.
      if (lastNodeHash != Long.MAX_VALUE) {
        this.addTermFreqInNode();
      }
View Full Code Here

      final int nodeFreq = postingsEnum.nodeFreqInDoc();
      docWriter.writeNodeFreq(nodeFreq);

      while (postingsEnum.nextNode()) {
        final IntsRef node = postingsEnum.node();
        nodWriter.write(node);

        final int termFreqInNode = postingsEnum.termFreqInNode();
        nodWriter.writeTermFreq(termFreqInNode);
View Full Code Here

TOP

Related Classes of org.apache.lucene.util.IntsRef

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.