Package org.apache.lucene.index

Examples of org.apache.lucene.index.IndexWriter


  @Override
  protected void setUp() throws Exception {
    super.setUp();
    RAMDirectory directory = new RAMDirectory();
    PayloadAnalyzer analyzer = new PayloadAnalyzer();
    IndexWriter writer
      = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
    writer.setSimilarity(similarity);
    //writer.infoStream = System.out;
    for (int i = 0; i < 1000; i++) {
      Document doc = new Document();
      doc.add(new Field("field", English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED));
      String txt = English.intToEnglish(i) +' '+English.intToEnglish(i+1);
      doc.add(new Field("field2",  txt, Field.Store.YES, Field.Index.ANALYZED));
      writer.addDocument(doc);
    }
    writer.optimize();
    writer.close();

    searcher = new IndexSearcher(directory, true);
    searcher.setSimilarity(similarity);
  }
View Full Code Here


 
  @Override
  public void setUp() throws Exception {
    super.setUp();
    RAMDirectory directory = new RAMDirectory();
    IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
    for (int i = 0; i < docFields.length; i++) {
      Document doc = new Document();
      doc.add(new Field(FIELD, docFields[i], Field.Store.NO, Field.Index.ANALYZED));
      writer.addDocument(doc);
    }
    writer.close();
    searcher = new IndexSearcher(directory, true);
  }
View Full Code Here

 
  // create an index of all the documents, or just the x, or just the y documents
  private Searcher getIndex (boolean even, boolean odd)
  throws IOException {
    RAMDirectory indexStore = new RAMDirectory ();
    IndexWriter writer = new IndexWriter (indexStore, new SimpleAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
    writer.setMaxBufferedDocs(2);
    writer.setMergeFactor(1000);
    for (int i=0; i<data.length; ++i) {
      if (((i%2)==0 && even) || ((i%2)==1 && odd)) {
        Document doc = new Document();
        doc.add (new Field ("tracer",   data[i][0], Field.Store.YES, Field.Index.NO));
        doc.add (new Field ("contents", data[i][1], Field.Store.NO, Field.Index.ANALYZED));
        if (data[i][2] != null) doc.add (new Field ("int",      data[i][2], Field.Store.NO, Field.Index.NOT_ANALYZED));
        if (data[i][3] != null) doc.add (new Field ("float",    data[i][3], Field.Store.NO, Field.Index.NOT_ANALYZED));
        if (data[i][4] != null) doc.add (new Field ("string",   data[i][4], Field.Store.NO, Field.Index.NOT_ANALYZED));
        if (data[i][5] != null) doc.add (new Field ("custom",   data[i][5], Field.Store.NO, Field.Index.NOT_ANALYZED));
        if (data[i][6] != null) doc.add (new Field ("i18n",     data[i][6], Field.Store.NO, Field.Index.NOT_ANALYZED));
        if (data[i][7] != null) doc.add (new Field ("long",     data[i][7], Field.Store.NO, Field.Index.NOT_ANALYZED));
        if (data[i][8] != null) doc.add (new Field ("double",     data[i][8], Field.Store.NO, Field.Index.NOT_ANALYZED));
        if (data[i][9] != null) doc.add (new Field ("short",     data[i][9], Field.Store.NO, Field.Index.NOT_ANALYZED));
        if (data[i][10] != null) doc.add (new Field ("byte",     data[i][10], Field.Store.NO, Field.Index.NOT_ANALYZED));
        if (data[i][11] != null) doc.add (new Field ("parser",     data[i][11], Field.Store.NO, Field.Index.NOT_ANALYZED));
        doc.setBoost(2)// produce some scores above 1.0
        writer.addDocument (doc);
      }
    }
    //writer.optimize ();
    writer.close ();
    IndexSearcher s = new IndexSearcher (indexStore, true);
    s.setDefaultFieldSortScoring(true, true);
    return s;
  }
View Full Code Here

    return getIndex (true, true);
  }
 
  private IndexSearcher getFullStrings() throws CorruptIndexException, LockObtainFailedException, IOException {
    RAMDirectory indexStore = new RAMDirectory ();
    IndexWriter writer = new IndexWriter (indexStore, new SimpleAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
    writer.setMaxBufferedDocs(4);
    writer.setMergeFactor(97);
    for (int i=0; i<NUM_STRINGS; i++) {
        Document doc = new Document();
        String num = getRandomCharString(getRandomNumber(2, 8), 48, 52);
        doc.add (new Field ("tracer", num, Field.Store.YES, Field.Index.NO));
        //doc.add (new Field ("contents", Integer.toString(i), Field.Store.NO, Field.Index.ANALYZED));
        doc.add (new Field ("string", num, Field.Store.NO, Field.Index.NOT_ANALYZED));
        String num2 = getRandomCharString(getRandomNumber(1, 4), 48, 50);
        doc.add (new Field ("string2", num2, Field.Store.NO, Field.Index.NOT_ANALYZED));
        doc.add (new Field ("tracer2", num2, Field.Store.YES, Field.Index.NO));
        doc.setBoost(2)// produce some scores above 1.0
        writer.setMaxBufferedDocs(getRandomNumber(2, 12));
        writer.addDocument (doc);
     
    }
    //writer.optimize ();
    //System.out.println(writer.getSegmentCount());
    writer.close ();
    return new IndexSearcher (indexStore, true);
  }
View Full Code Here

  @Override
  public void setUp() throws Exception {
    super.setUp();
    RAMDirectory directory = new RAMDirectory();
    IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
    for (int i = 0; i < docFields.length; i++) {
      Document doc = new Document();
      doc.add(new Field(field, docFields[i], Field.Store.YES, Field.Index.ANALYZED));
      writer.addDocument(doc);
    }
    writer.close();
    searcher = new IndexSearcher(directory, true);
  }
View Full Code Here

  }

  // LUCENE-1404
  public void testNPESpanQuery() throws Throwable {
    final Directory dir = new MockRAMDirectory();
    final IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT, Collections.emptySet()), IndexWriter.MaxFieldLength.LIMITED);

    // Add documents
    addDoc(writer, "1", "the big dogs went running to the market");
    addDoc(writer, "2", "the cat chased the mouse, then the cat ate the mouse quickly");
   
    // Commit
    writer.close();

    // Get searcher
    final IndexReader reader = IndexReader.open(dir, true);
    final IndexSearcher searcher = new IndexSearcher(reader);
View Full Code Here

        super.setUp();
        super.setUp();

        // create test index
        mDirectory = new RAMDirectory();
        final IndexWriter writer = new IndexWriter(mDirectory, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
        addDocument(writer, "1", "I think it should work.");
        addDocument(writer, "2", "I think it should work.");
        addDocument(writer, "3", "I think it should work.");
        addDocument(writer, "4", "I think it should work.");
        writer.close();
        searcher = new IndexSearcher(mDirectory, true);
    }
View Full Code Here

    @Override
    protected void setUp() throws Exception {
        super.setUp();

        // create test index
        final IndexWriter writer = new IndexWriter(mDirectory, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
        addDocument(writer, "A", "Should we, could we, would we?");
        addDocument(writer, "B", "It should.  Should it?");
        addDocument(writer, "C", "It shouldn't.");
        addDocument(writer, "D", "Should we, should we, should we.");
        writer.close();

        // re-open the searcher since we added more docs
        searcher2 = new IndexSearcher(mDirectory, true);
    }
View Full Code Here

    if (tempDir == null)
      throw new IOException("java.io.tmpdir undefined, cannot run test");
    indexDir = new File(tempDir, "RAMDirIndex");
   
    Directory dir = FSDirectory.open(indexDir);
    IndexWriter writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
    // add some documents
    Document doc = null;
    for (int i = 0; i < docsToAdd; i++) {
      doc = new Document();
      doc.add(new Field("content", English.intToEnglish(i).trim(), Field.Store.YES, Field.Index.NOT_ANALYZED));
      writer.addDocument(doc);
    }
    assertEquals(docsToAdd, writer.maxDoc());
    writer.close();
    dir.close();
  }
View Full Code Here

     
    Directory dir = FSDirectory.open(indexDir);
    final MockRAMDirectory ramDir = new MockRAMDirectory(dir);
    dir.close();
   
    final IndexWriter writer  = new IndexWriter(ramDir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED);
    writer.optimize();
   
    assertEquals(ramDir.sizeInBytes(), ramDir.getRecomputedSizeInBytes());
   
    Thread[] threads = new Thread[numThreads];
    for (int i=0; i<numThreads; i++) {
      final int num = i;
      threads[i] = new Thread(){
        @Override
        public void run() {
          for (int j=1; j<docsPerThread; j++) {
            Document doc = new Document();
            doc.add(new Field("sizeContent", English.intToEnglish(num*docsPerThread+j).trim(), Field.Store.YES, Field.Index.NOT_ANALYZED));
            try {
              writer.addDocument(doc);
            } catch (IOException e) {
              throw new RuntimeException(e);
            }
          }
        }
      };
    }
    for (int i=0; i<numThreads; i++)
      threads[i].start();
    for (int i=0; i<numThreads; i++)
      threads[i].join();

    writer.optimize();
    assertEquals(ramDir.sizeInBytes(), ramDir.getRecomputedSizeInBytes());
   
    writer.close();
  }
View Full Code Here

TOP

Related Classes of org.apache.lucene.index.IndexWriter

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.