Examples of Directory


Examples of org.apache.lucene.store.Directory

   
    //读取本类目录下的text.txt文件
    String content = ContentReader.readText(Chinese.class);

    //接下来是标准的Lucene建立索引和检索的代码
    Directory ramDir = new RAMDirectory();
    IndexWriter writer = new IndexWriter(ramDir, analyzer, MaxFieldLength.UNLIMITED);
    Document doc = new Document();
    Field fd = new Field(FIELD_NAME, content, Field.Store.YES,
        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
    doc.add(fd);
View Full Code Here

Examples of org.apache.lucene.store.Directory

import org.apache.lucene.util._TestUtil;

public class TestIndexReader extends LuceneTestCase {
   
    public void testCommitUserData() throws Exception {
      Directory d = newDirectory();

      Map<String,String> commitUserData = new HashMap<String,String>();
      commitUserData.put("foo", "fighters");
     
      // set up writer
      IndexWriter writer = new IndexWriter(d, newIndexWriterConfig(
          TEST_VERSION_CURRENT, new MockAnalyzer(random))
      .setMaxBufferedDocs(2));
      for(int i=0;i<27;i++)
        addDocumentWithFields(writer);
      writer.close();
     
      IndexReader r = IndexReader.open(d, false);
      r.deleteDocument(5);
      r.flush(commitUserData);
      r.close();
     
      SegmentInfos sis = new SegmentInfos();
      sis.read(d);
      IndexReader r2 = IndexReader.open(d, false);
      IndexCommit c = r.getIndexCommit();
      assertEquals(c.getUserData(), commitUserData);

      assertEquals(sis.getCurrentSegmentFileName(), c.getSegmentsFileName());

      assertTrue(c.equals(r.getIndexCommit()));

      // Change the index
      writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
          new MockAnalyzer(random)).setOpenMode(
              OpenMode.APPEND).setMaxBufferedDocs(2));
      for(int i=0;i<7;i++)
        addDocumentWithFields(writer);
      writer.close();

      IndexReader r3 = r2.reopen();
      assertFalse(c.equals(r3.getIndexCommit()));
      assertFalse(r2.getIndexCommit().isOptimized());
      r3.close();

      writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
        new MockAnalyzer(random))
        .setOpenMode(OpenMode.APPEND));
      writer.optimize();
      writer.close();

      r3 = r2.reopen();
      assertTrue(r3.getIndexCommit().isOptimized());
      r2.close();
      r3.close();
      d.close();
    }
View Full Code Here

Examples of org.apache.lucene.store.Directory

      r3.close();
      d.close();
    }
   
    public void testIsCurrent() throws Exception {
      Directory d = newDirectory();
      IndexWriter writer = new IndexWriter(d, newIndexWriterConfig(
        TEST_VERSION_CURRENT, new MockAnalyzer(random)));
      addDocumentWithFields(writer);
      writer.close();
      // set up reader:
      IndexReader reader = IndexReader.open(d, false);
      assertTrue(reader.isCurrent());
      // modify index by adding another document:
      writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
          new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
      addDocumentWithFields(writer);
      writer.close();
      assertFalse(reader.isCurrent());
      // re-create index:
      writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
          new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
      addDocumentWithFields(writer);
      writer.close();
      assertFalse(reader.isCurrent());
      reader.close();
      d.close();
    }
View Full Code Here

Examples of org.apache.lucene.store.Directory

    /**
     * Tests the IndexReader.getFieldNames implementation
     * @throws Exception on error
     */
    public void testGetFieldNames() throws Exception {
        Directory d = newDirectory();
        // set up writer
        IndexWriter writer = new IndexWriter(
            d,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
        );

        Document doc = new Document();
        doc.add(new Field("keyword","test1", Field.Store.YES, Field.Index.NOT_ANALYZED));
        doc.add(new Field("text","test1", Field.Store.YES, Field.Index.ANALYZED));
        doc.add(new Field("unindexed","test1", Field.Store.YES, Field.Index.NO));
        doc.add(new Field("unstored","test1", Field.Store.NO, Field.Index.ANALYZED));
        writer.addDocument(doc);

        writer.close();
        // set up reader
        IndexReader reader = IndexReader.open(d, false);
        Collection<String> fieldNames = reader.getFieldNames(IndexReader.FieldOption.ALL);
        assertTrue(fieldNames.contains("keyword"));
        assertTrue(fieldNames.contains("text"));
        assertTrue(fieldNames.contains("unindexed"));
        assertTrue(fieldNames.contains("unstored"));
        reader.close();
        // add more documents
        writer = new IndexWriter(
            d,
            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
                setOpenMode(OpenMode.APPEND).
                setMergePolicy(newLogMergePolicy())
        );
        // want to get some more segments here
        int mergeFactor = ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMergeFactor();
        for (int i = 0; i < 5*mergeFactor; i++) {
          doc = new Document();
          doc.add(new Field("keyword","test1", Field.Store.YES, Field.Index.NOT_ANALYZED));
          doc.add(new Field("text","test1", Field.Store.YES, Field.Index.ANALYZED));
          doc.add(new Field("unindexed","test1", Field.Store.YES, Field.Index.NO));
          doc.add(new Field("unstored","test1", Field.Store.NO, Field.Index.ANALYZED));
          writer.addDocument(doc);
        }
        // new fields are in some different segments (we hope)
        for (int i = 0; i < 5*mergeFactor; i++) {
          doc = new Document();
          doc.add(new Field("keyword2","test1", Field.Store.YES, Field.Index.NOT_ANALYZED));
          doc.add(new Field("text2","test1", Field.Store.YES, Field.Index.ANALYZED));
          doc.add(new Field("unindexed2","test1", Field.Store.YES, Field.Index.NO));
          doc.add(new Field("unstored2","test1", Field.Store.NO, Field.Index.ANALYZED));
          writer.addDocument(doc);
        }
        // new termvector fields
        for (int i = 0; i < 5*mergeFactor; i++) {
          doc = new Document();
          doc.add(new Field("tvnot","tvnot", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
          doc.add(new Field("termvector","termvector", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.YES));
          doc.add(new Field("tvoffset","tvoffset", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_OFFSETS));
          doc.add(new Field("tvposition","tvposition", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
          doc.add(newField("tvpositionoffset","tvpositionoffset", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
          writer.addDocument(doc);
        }
       
        writer.close();
        // verify fields again
        reader = IndexReader.open(d, false);
        fieldNames = reader.getFieldNames(IndexReader.FieldOption.ALL);
        assertEquals(13, fieldNames.size());    // the following fields
        assertTrue(fieldNames.contains("keyword"));
        assertTrue(fieldNames.contains("text"));
        assertTrue(fieldNames.contains("unindexed"));
        assertTrue(fieldNames.contains("unstored"));
        assertTrue(fieldNames.contains("keyword2"));
        assertTrue(fieldNames.contains("text2"));
        assertTrue(fieldNames.contains("unindexed2"));
        assertTrue(fieldNames.contains("unstored2"));
        assertTrue(fieldNames.contains("tvnot"));
        assertTrue(fieldNames.contains("termvector"));
        assertTrue(fieldNames.contains("tvposition"));
        assertTrue(fieldNames.contains("tvoffset"));
        assertTrue(fieldNames.contains("tvpositionoffset"));
       
        // verify that only indexed fields were returned
        fieldNames = reader.getFieldNames(IndexReader.FieldOption.INDEXED);
        assertEquals(11, fieldNames.size());    // 6 original + the 5 termvector fields
        assertTrue(fieldNames.contains("keyword"));
        assertTrue(fieldNames.contains("text"));
        assertTrue(fieldNames.contains("unstored"));
        assertTrue(fieldNames.contains("keyword2"));
        assertTrue(fieldNames.contains("text2"));
        assertTrue(fieldNames.contains("unstored2"));
        assertTrue(fieldNames.contains("tvnot"));
        assertTrue(fieldNames.contains("termvector"));
        assertTrue(fieldNames.contains("tvposition"));
        assertTrue(fieldNames.contains("tvoffset"));
        assertTrue(fieldNames.contains("tvpositionoffset"));
       
        // verify that only unindexed fields were returned
        fieldNames = reader.getFieldNames(IndexReader.FieldOption.UNINDEXED);
        assertEquals(2, fieldNames.size());    // the following fields
        assertTrue(fieldNames.contains("unindexed"));
        assertTrue(fieldNames.contains("unindexed2"));
               
        // verify index term vector fields 
        fieldNames = reader.getFieldNames(IndexReader.FieldOption.TERMVECTOR);
        assertEquals(1, fieldNames.size());    // 1 field has term vector only
        assertTrue(fieldNames.contains("termvector"));
       
        fieldNames = reader.getFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_POSITION);
        assertEquals(1, fieldNames.size());    // 4 fields are indexed with term vectors
        assertTrue(fieldNames.contains("tvposition"));
       
        fieldNames = reader.getFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_OFFSET);
        assertEquals(1, fieldNames.size());    // 4 fields are indexed with term vectors
        assertTrue(fieldNames.contains("tvoffset"));
               
        fieldNames = reader.getFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_POSITION_OFFSET);
        assertEquals(1, fieldNames.size());    // 4 fields are indexed with term vectors
        assertTrue(fieldNames.contains("tvpositionoffset"));
        reader.close();
        d.close();
    }
View Full Code Here

Examples of org.apache.lucene.store.Directory

        reader.close();
        d.close();
    }

  public void testTermVectors() throws Exception {
    Directory d = newDirectory();
    // set up writer
    IndexWriter writer = new IndexWriter(
        d,
        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
            setMergePolicy(newLogMergePolicy())
    );
    // want to get some more segments here
    // new termvector fields
    int mergeFactor = ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMergeFactor();
    for (int i = 0; i < 5 * mergeFactor; i++) {
      Document doc = new Document();
        doc.add(new Field("tvnot","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
        doc.add(new Field("termvector","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.YES));
        doc.add(new Field("tvoffset","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_OFFSETS));
        doc.add(new Field("tvposition","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
        doc.add(new Field("tvpositionoffset","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));

        writer.addDocument(doc);
    }
    writer.close();
    IndexReader reader = IndexReader.open(d, false);
    FieldSortedTermVectorMapper mapper = new FieldSortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
    reader.getTermFreqVector(0, mapper);
    Map<String,SortedSet<TermVectorEntry>> map = mapper.getFieldToTerms();
    assertTrue("map is null and it shouldn't be", map != null);
    assertTrue("map Size: " + map.size() + " is not: " + 4, map.size() == 4);
    Set<TermVectorEntry> set = map.get("termvector");
    for (Iterator<TermVectorEntry> iterator = set.iterator(); iterator.hasNext();) {
      TermVectorEntry entry =  iterator.next();
      assertTrue("entry is null and it shouldn't be", entry != null);
      if (VERBOSE) System.out.println("Entry: " + entry);
    }
    reader.close();
    d.close();
  }
View Full Code Here

Examples of org.codehaus.mojo.appassembler.model.Directory

            Object object = it.next();

            if ( object instanceof Directory )
            {
                Directory directory = (Directory) object;

                if ( directory.getRelativePath().charAt( 0 ) != '/' )
                {
                    classpathBuffer.append( getBasedir() ).append( getSeparator() );
                }
            }
            else if ( object instanceof Dependency )
View Full Code Here

Examples of org.entityfs.Directory

  }

  @Override
  public JavaFileObject getJavaFileForOutput(JavaFileManager.Location loc, String className, JavaFileObject.Kind kind, FileObject sibling)
  {
    Directory dir;
    int pos = className.lastIndexOf('.');
    if (pos >= 0)
    {
      dir = Directories.putIfAbsentDirectory(m_outDir, new RelativeLocation(className.substring(0, pos).replace('.', '/')));
      className = className.substring(pos + 1);
View Full Code Here

Examples of org.evolizer.model.resources.entities.fs.Directory

     * @param path
     *            the Path of the directory to create
     * @return the {@link Directory} created
     */
    private Directory createDirectory(String path) {
        Directory directory = fDirectories.get(path);
        if (directory == null) {
            directory = new Directory(path, null);
            Directory parent = null;
            if (path != null) {
                String parentPath = this.getParentDirectoryPath(path);
                if (parentPath != null) {
                    parent = fDirectories.get(parentPath);
                    if (parent == null) {
                        parent = createDirectory(parentPath);
                    }
                    directory.setParentDirectory(parent);
                    parent.add(directory);
                }
            }
            fDirectories.put(path, directory);
            LOGGER.debug("Created Directory [" + directory + "]");
        }
View Full Code Here

Examples of org.geoserver.importer.Directory

        if (!uploadDir.exists()) {
            throw new RuntimeException("Unable to create directory for file upload");
        }

        // pass off the uploaded file to the importer
        Directory dir = new Directory(uploadDir);
        dir.accept(files.next());

        return doImport(dir, ws);
    }
View Full Code Here

Examples of org.gradle.api.tasks.Directory

    }

    public Directory dir(String path) {
        String[] pathElements = path.split("/");
        String name = "";
        Directory dirTask = null;
        for (String pathElement : pathElements) {
            name += name.length() != 0 ? "/" + pathElement : pathElement;
            Task task = taskContainer.findByName(name);
            if (task instanceof Directory) {
                dirTask = (Directory) task;
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.