Package org.terrier.structures

Examples of org.terrier.structures.DocumentIndexEntry


      String outputPrefix = "-0";
      DataInputStream currentStream = new DataInputStream(Files.openFileStream(index.getPath() + ApplicationSetup.FILE_SEPARATOR + index.getPrefix() + "." + targetStructureName +outputPrefix+ ".pointers"));
      //logger.info("Adding pointers to the document index");
      while(diis.hasNext())
      {
        DocumentIndexEntry die =  diis.next();
        pointer.readFields(currentStream);
        DocumentIndexEntry newDIentry = fields
          ? new FieldDocumentIndexEntry(die)
          : new BasicDocumentIndexEntry(die);
        newDIentry.setOffset(pointer);
        newDIentry.setNumberOfEntries(pointer.getNumberOfEntries());
        dios.addEntryToBuffer(newDIentry);
      }
      //logger.info("Renaming reducer output as direct file");
      Files.delete(index.getPath() + ApplicationSetup.FILE_SEPARATOR + index.getPrefix() + "." + targetStructureName+ BitIn.USUAL_EXTENSION);
      Files.rename(
          index.getPath() + ApplicationSetup.FILE_SEPARATOR + index.getPrefix() + "." + targetStructureName+outputPrefix + BitIn.USUAL_EXTENSION,
          index.getPath() + ApplicationSetup.FILE_SEPARATOR + index.getPrefix() + "." + targetStructureName+ BitIn.USUAL_EXTENSION);
      currentStream.close();
      Files.delete(index.getPath() + ApplicationSetup.FILE_SEPARATOR + index.getPrefix() + "." + targetStructureName +outputPrefix+ ".pointers");
    }
    else if (numberOfReducers <= numberOfReduceTaskLimits)
    {
      //logger.info("Merging direct index pointers from "+ numberOfReducers + " reducers");
      final int partitionSize = (int)Math.ceil( (double)(index.getCollectionStatistics().getNumberOfDocuments()) / (double)numberOfReducers);     
      for(byte reduce = 0; reduce < numberOfReducers; reduce++)
      {
        //logger.info("Merging in pointers from reduce task " + reduce);
        String outputPrefix = "-" + reduce;
        DataInputStream currentStream = new DataInputStream(Files.openFileStream(index.getPath() + ApplicationSetup.FILE_SEPARATOR + index.getPrefix() + "." + targetStructureName +outputPrefix+ ".pointers"));
        for(int docOffset = 0; docOffset < partitionSize && diis.hasNext(); docOffset++)
        {
          DocumentIndexEntry die =  diis.next();
          pointer.readFields(currentStream);
          DocumentIndexEntry newDIentry = fields
            ? new FieldDocumentIndexEntry(die)
            : new BasicDocumentIndexEntry(die);
          newDIentry.setOffset(pointer);
          newDIentry.setFileNumber(reduce);
          newDIentry.setNumberOfEntries(pointer.getNumberOfEntries());
          dios.addEntryToBuffer(newDIentry);
        }
        currentStream.close();
        Files.delete(index.getPath() + ApplicationSetup.FILE_SEPARATOR + index.getPrefix() + "." + targetStructureName +outputPrefix+ ".pointers");
        //logger.info("Renaming direct file part for reduce task " + reduce);
        String sourcePartDFfilename = index.getPath() + ApplicationSetup.FILE_SEPARATOR + index.getPrefix() + "." + targetStructureName+outputPrefix + BitIn.USUAL_EXTENSION;
        String destPartDFfilename = index.getPath() + ApplicationSetup.FILE_SEPARATOR + index.getPrefix() + "." + targetStructureName+ BitIn.USUAL_EXTENSION + reduce;       
        Files.rename(sourcePartDFfilename, destPartDFfilename);
      }
      index.setIndexProperty("index."+targetStructureName+".data-files", ""+numberOfReducers);
      index.flush();
      IndexUtil.close(diis);
    }
    else
    {
      //logger.info("Merging direct index output from "+ numberOfReducers + " reducers");
     
      final int partitionSize = (int)Math.ceil( (double)(index.getCollectionStatistics().getNumberOfDocuments()) / (double)numberOfReducers);
      final OutputStream DFout = Files.writeFileStream(index.getPath() + ApplicationSetup.FILE_SEPARATOR + index.getPrefix() + "." + targetStructureName+ BitIn.USUAL_EXTENSION);
      long finalFileOffset = 0;
     
      for(int reduce = 0; reduce < numberOfReducers; reduce++)
      {
        //logger.info("Copying document index part for reduce task " + reduce);
        String outputPrefix = "-" + reduce;
        DataInputStream currentStream = new DataInputStream(Files.openFileStream(index.getPath() + ApplicationSetup.FILE_SEPARATOR + index.getPrefix() + "." + targetStructureName +outputPrefix+ ".pointers"));
        for(int docOffset = 0; docOffset < partitionSize && diis.hasNext(); docOffset++)
        {
          DocumentIndexEntry die =  diis.next();
          pointer.readFields(currentStream);
          DocumentIndexEntry newDIentry = fields
            ? new FieldDocumentIndexEntry(die)
            : new BasicDocumentIndexEntry(die);
          newDIentry.setOffset(finalFileOffset + pointer.getOffset(), pointer.getOffsetBits());
          newDIentry.setNumberOfEntries(pointer.getNumberOfEntries());
          dios.addEntryToBuffer(newDIentry);
        }
        currentStream.close();
        Files.delete(index.getPath() + ApplicationSetup.FILE_SEPARATOR + index.getPrefix() + "." + targetStructureName +outputPrefix+ ".pointers");
        //logger.info("Copying direct file part for reduce task " + reduce);
View Full Code Here


      //write the offsets to the DocumentIndex
      final DataInputStream dis = new DataInputStream(Files.openFileStream(offsetsFilename));
      final DocumentIndexBuilder dios = new DocumentIndexBuilder(index, "document-df");
      final Iterator<DocumentIndexEntry> docidInput = (Iterator<DocumentIndexEntry>)index.getIndexStructureInputStream("document");
     
      DocumentIndexEntry die = null;
      int docid = 0;
        while (docidInput.hasNext())
      {
          DocumentIndexEntry old = docidInput.next();
          if (fieldCount == 0)
          {
            die = new BasicDocumentIndexEntry(old);
          }
          else
View Full Code Here

    }

    /** Return a DocumentIndexEntry for this document */
    public DocumentIndexEntry getDocumentStatistics()
  {
    DocumentIndexEntry die = new BasicDocumentIndexEntry();
    die.setDocumentLength(this.getDocumentLength());
    die.setNumberOfEntries(this.getNumberOfPointers());
    return die;
  }
View Full Code Here

      numberOfDocsSinceCheck++;
      numberOfDocsSinceFlush++;
     
      checkFlush();
      mp.addTerms(termsInDocument, currentId);
      DocumentIndexEntry die = termsInDocument.getDocumentStatistics();
      docIndexBuilder.addEntryToBuffer((FieldScore.FIELDS_COUNT > 0) ? die : new SimpleDocumentIndexEntry(die));
      metaBuilder.writeDocumentEntry(docProperties);
      currentId++;
      numberOfDocuments++;
    }
View Full Code Here

    lexiconBuilder.addDocumentTerms(_termsInDocument);
    /* add doc postings to the direct index */
    BitIndexPointer dirIndexPost = directIndexBuilder.writePostings(_termsInDocument.getPostings2());
      //.addDocument(termsInDocument.getPostings());
    /* add doc to documentindex */
    DocumentIndexEntry die = _termsInDocument.getDocumentStatistics();
    die.setBitIndexPointer(dirIndexPost);
    docIndexBuilder.addEntryToBuffer(die);
    /** add doc metadata to index */
    metaBuilder.writeDocumentEntry(docProperties);   
  }
 
View Full Code Here

    /* add words to lexicontree */
    lexiconBuilder.addDocumentTerms(_termsInDocument);
    /* add doc postings to the direct index */
    BitIndexPointer dirIndexPost = directIndexBuilder.writePostings(_termsInDocument.getPostings2());
    /* add doc to documentindex */
    DocumentIndexEntry die = _termsInDocument.getDocumentStatistics();
    die.setBitIndexPointer(dirIndexPost);
    docIndexBuilder.addEntryToBuffer(die);
    /** add doc metadata to index */
    metaBuilder.writeDocumentEntry(docProperties);
  }
 
View Full Code Here

      int sourceDocid = 0;
      //traversing the direct index, without any change
      while(docidInput1.hasNext())
      {
        BitIndexPointer pointerDF = emptyPointer;
        DocumentIndexEntry die = docidInput1.next();
        if (die.getDocumentLength() > 0)
        {
          pointerDF = dfOutput.writePostings(dfInput1.next());
        }
        die.setBitIndexPointer(pointerDF);
        docidOutput.addEntryToBuffer(die);
        metaBuilder.writeDocumentEntry(metaInput1.getAllItems(sourceDocid));
        sourceDocid++;
      }
      dfInput1.close();
      metaInput1.close();
      IndexUtil.close(docidInput1);
      final Iterator<DocumentIndexEntry> docidInput2 = (Iterator<DocumentIndexEntry>)srcIndex2.getIndexStructureInputStream("document");
      final PostingIndexInputStream dfInput2 = (PostingIndexInputStream)srcIndex2.getIndexStructureInputStream("direct");
      final MetaIndex metaInput2 = srcIndex2.getMetaIndex();
     
      sourceDocid = 0;
      while (docidInput2.hasNext())
      {
        DocumentIndexEntry die = docidInput2.next();
     
        BitIndexPointer pointerDF = emptyPointer;
        if (die.getDocumentLength() > 0)
        {
          final IterablePosting postings = dfInput2.next();
         
          List<Posting> postingList = new ArrayList<Posting>();
          while(postings.next() != IterablePosting.EOL)
          {
            final Posting p = postings.asWritablePosting();
            p.setId(termcodeHashmap.get(postings.getId()));
            postingList.add(p);
          }
          Collections.sort(postingList, new PostingIdComparator());
          pointerDF = dfOutput.writePostings(postingList.iterator());
        }
        die.setBitIndexPointer(pointerDF);
        docidOutput.addEntryToBuffer(die);
        metaBuilder.writeDocumentEntry(metaInput2.getAllItems(sourceDocid));
        sourceDocid++;
      }
      dfInput2.close();
View Full Code Here

     
      //traversing the first set of files, without any change
      while(docidInput1.hasNext())
      {
        metaInput1.hasNext();
        DocumentIndexEntry die = docidInput1.next();
        DocumentIndexEntry dieNew = (fieldCount > 0) ? die : new SimpleDocumentIndexEntry(die);
        docidOutput.addEntryToBuffer(dieNew);
        metaBuilder.writeDocumentEntry(metaInput1.next());
      }
     
      final Iterator<DocumentIndexEntry> docidInput2 = (Iterator<DocumentIndexEntry>)srcIndex2.getIndexStructureInputStream("document");
      final Iterator<String[]> metaInput2 = (Iterator<String[]>)srcIndex2.getIndexStructureInputStream("meta");
      //traversing the 2nd set of files, without any change
      while(docidInput2.hasNext())
      {
        metaInput2.hasNext();
        DocumentIndexEntry die = docidInput2.next();
        DocumentIndexEntry dieNew = (fieldCount > 0) ? die : new SimpleDocumentIndexEntry(die);
        docidOutput.addEntryToBuffer(dieNew);
        metaBuilder.writeDocumentEntry(metaInput2.next());
      }
     
      docidOutput.finishedCollections();
View Full Code Here

       */
      DirectIndex directIndex = index.getDirectIndex();
      assertNotNull(directIndex);
      // for each document
      for (int d = 0; d < dirTfs.length; d++) {
        DocumentIndexEntry de = documentIndex.getDocumentEntry(d);
        assertNotNull(de);
        ip = directIndex.getPostings((BitIndexPointer) de);
        FieldPosting fp = fieldsExpected ? (FieldPosting)ip : null;
        // for each term
        int t = 0;
        int countFoundTerms = 0;
        while (ip.next() != IterablePosting.EOL) {
          int termid = ip.getId();
          assertTrue(termid >= 0);
          String term = lexicon.getLexiconEntry(termid).getKey();
          assertNotNull(term);
          countFoundTerms++;
          assertTrue(dirTfs[d].containsKey(term));
          assertEquals(dirTfs[d].get(term), ip.getFrequency());
          assertEquals(doclens[d], ip.getDocumentLength());         
         
          if (fieldsExpected) {
            assertEquals(2, fp.getFieldFrequencies().length);
            for (int f = 0; f < 2; f++) {
              assertEquals(dirFfs[d].get(term)[f], fp.getFieldFrequencies()[f]);
            }
          }
          t++;
        }
        assertEquals(dirTfs[d].size() ,countFoundTerms);
        ip.close();
      }
      // post-check
      assertEquals(IterablePosting.EOL, ip.next());

      /**
       * Test {@link IterablePosting} entries from a {@link DirectIndexInputStream}
       */
      bpiis = (BitPostingIndexInputStream) index.getIndexStructureInputStream("direct");
      assertNotNull(bpiis);
      // for each document
      for (int d = 0; d < dirTfs.length; d++) {
        assertTrue(bpiis.hasNext());
        ip = bpiis.next();
        assertNotNull(ip);
        FieldPosting fp = fieldsExpected ? (FieldPosting)ip : null;
        // for each term
        int t = 0;
        int countFoundTerms = 0;
        while (ip.next() != IterablePosting.EOL) {
          int termid = ip.getId();
          assertTrue(termid >= 0);
          String term = lexicon.getLexiconEntry(termid).getKey();
          assertNotNull(term);
          countFoundTerms++;
          assertTrue(dirTfs[d].containsKey(term));
          assertEquals(dirTfs[d].get(term), ip.getFrequency());
          assertEquals(doclens[d], ip.getDocumentLength());         
         
          if (fieldsExpected) {
            assertEquals(2, fp.getFieldFrequencies().length);
            for (int f = 0; f < 2; f++) {
              assertEquals(dirFfs[d].get(term)[f], fp.getFieldFrequencies()[f]);
            }
          }
          t++;
        }
        assertEquals(dirTfs[d].size() ,countFoundTerms);
      }
      // post-check
      assertFalse(bpiis.hasNext());

      /**
       * Test posting array entries from a {@link DirectIndex}
       */
      // for each document
      for (int d = 0; d < dirTfs.length; d++) {
        DocumentIndexEntry de = documentIndex.getDocumentEntry(d);
        assertNotNull(de);
       
        int[][] terms = directIndex.getTerms(de);
       
        if (!fieldsExpected) {
View Full Code Here

    //check index as stream
    Iterator<DocumentIndexEntry> iDie = (Iterator<DocumentIndexEntry>) index.getIndexStructureInputStream("document");
    assertNotNull("Failed to get a document inputstream", iDie);
    while(iDie.hasNext())
    {
      DocumentIndexEntry die = iDie.next();
      docid++;
      //System.out.println(die.getDocumentLength());
      assertEquals("Document lengths for docid "+ docid + " dont match", lengths[docid], die.getDocumentLength());
    }
    //check docid is as large as expected
    assertEquals("Metaindex as stream didnt have expected number of entries", lengths.length -1, docid);
    IndexUtil.close(iDie);
   
View Full Code Here

TOP

Related Classes of org.terrier.structures.DocumentIndexEntry

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.