Examples of AddLuceneWork


Examples of org.hibernate.search.backend.AddLuceneWork

  }

  public AddLuceneWork createAddWork(Class<T> entityClass, T entity, Serializable id, String idInString, InstanceInitializer sessionInitializer, ConversionContext conversionContext) {
    Map<String, String> fieldToAnalyzerMap = new HashMap<String, String>();
    Document doc = getDocument( entity, id, fieldToAnalyzerMap, sessionInitializer, conversionContext );
    final AddLuceneWork addWork;
    if ( fieldToAnalyzerMap.isEmpty() ) {
      addWork = new AddLuceneWork( id, idInString, entityClass, doc );
    }
    else {
      addWork = new AddLuceneWork( id, idInString, entityClass, doc, fieldToAnalyzerMap );
    }
    return addWork;
  }
View Full Code Here

Examples of org.hibernate.search.backend.AddLuceneWork

  }

  public AddLuceneWork createAddWork(Class<T> entityClass, T entity, Serializable id, String idInString, boolean isBatch) {
    Map<String, String> fieldToAnalyzerMap = new HashMap<String, String>();
    Document doc = getDocument( entity, id, fieldToAnalyzerMap );
    AddLuceneWork addWork;
    if ( fieldToAnalyzerMap.isEmpty() ) {
      addWork = new AddLuceneWork( id, idInString, entityClass, doc, isBatch );
    }
    else {
      addWork = new AddLuceneWork( id, idInString, entityClass, doc, fieldToAnalyzerMap, isBatch );
    }
    return addWork;
  }
View Full Code Here

Examples of org.hibernate.search.backend.AddLuceneWork

    doc.add( field );
    field = new Field("id", "1", Field.Store.YES, Field.Index.UN_TOKENIZED );
    doc.add( field );
    field = new Field("logo", ts.getLogo(), Field.Store.NO, Field.Index.TOKENIZED );
    doc.add( field );
    LuceneWork luceneWork = new AddLuceneWork(ts.getId(), String.valueOf( ts.getId() ), ts.getClass(), doc );
    List<LuceneWork> queue = new ArrayList<LuceneWork>();
    queue.add( luceneWork );

    //send the queue
    InitialContext context = new InitialContext();
View Full Code Here

Examples of org.hibernate.search.backend.AddLuceneWork

  }

  public AddLuceneWork createAddWork(Class<T> entityClass, T entity, Serializable id, String idInString, ClassNavigator sessionInitializer) {
    Map<String, String> fieldToAnalyzerMap = new HashMap<String, String>();
    Document doc = getDocument( entity, id, fieldToAnalyzerMap, sessionInitializer );
    final AddLuceneWork addWork;
    if ( fieldToAnalyzerMap.isEmpty() ) {
      addWork = new AddLuceneWork( id, idInString, entityClass, doc );
    }
    else {
      addWork = new AddLuceneWork( id, idInString, entityClass, doc, fieldToAnalyzerMap );
    }
    return addWork;
  }
View Full Code Here

Examples of org.hibernate.search.backend.AddLuceneWork

  }

  private AddLuceneWork createAddWork(Class<T> entityClass, T entity, Serializable id, String idInString, boolean isBatch) {
    Map<String, String> fieldToAnalyzerMap = new HashMap<String, String>();
    Document doc = getDocument( entity, id, fieldToAnalyzerMap );
    AddLuceneWork addWork;
    if ( fieldToAnalyzerMap.isEmpty() ) {
      addWork = new AddLuceneWork( id, idInString, entityClass, doc, isBatch );
    }
    else {
      addWork = new AddLuceneWork( id, idInString, entityClass, doc, fieldToAnalyzerMap, isBatch );
    }
    return addWork;
  }
View Full Code Here

Examples of org.hibernate.search.backend.AddLuceneWork

    doc.add( field );
    field = new Field( "logo", shirt.getLogo(), Field.Store.NO, Field.Index.ANALYZED );
    doc.add( field );
    DoubleField numField = new DoubleField( "length", shirt.getLength(), Field.Store.NO );
    doc.add( numField );
    LuceneWork luceneWork = new AddLuceneWork(
        shirt.getId(), String.valueOf( shirt.getId() ), shirt.getClass(), doc
    );
    List<LuceneWork> queue = new ArrayList<LuceneWork>();
    queue.add( luceneWork );
    return queue;
View Full Code Here

Examples of org.hibernate.search.backend.AddLuceneWork

    fullTextSession.save( test );
    tx.commit();

    List<LuceneWork> processedQueue = LeakingLuceneBackend.getLastProcessedQueue();
    assertTrue( processedQueue.size() == 1 );
    AddLuceneWork addLuceneWork = (AddLuceneWork) processedQueue.get( 0 );
    Document doc = addLuceneWork.getDocument();

    IndexableField implicitNormField = doc.getField( "withNormsImplicit" );
    assertFalse( "norms should be stored for this field", implicitNormField.fieldType().omitNorms() );

    IndexableField explicitNormField = doc.getField( "withNormsExplicit" );
View Full Code Here

Examples of org.hibernate.search.backend.AddLuceneWork

  }

  private List<LuceneWork> makeSomeWork() {
    List<LuceneWork> list = new LinkedList<>();
    //just some random data:
    list.add( new AddLuceneWork( Integer.valueOf( 5 ), "id:5", Book.class, new Document() ) );
    list.add( new AddLuceneWork( Integer.valueOf( 6 ), "id:6", Book.class, new Document() ) );
    return list;
  }
View Full Code Here

Examples of org.hibernate.search.backend.AddLuceneWork

    Class<?> entityClass = ClassLoaderHelper.classForName(
        entityClassName,
        "entity class",
        searchFactory.getServiceManager()
    );
    LuceneWork result = new AddLuceneWork(
        id,
        objectIdInString( entityClass, id, conversionContext ),
        entityClass,
        getLuceneDocument(),
        fieldToAnalyzerMap
View Full Code Here

Examples of org.hibernate.search.backend.AddLuceneWork

    numField = new LongField( "long", 23l, Store.NO );
    doc.add( numField );

    Map<String, String> analyzers = new HashMap<String, String>();
    analyzers.put( "godo", "ngram" );
    works.add( new AddLuceneWork( 123, "123", RemoteEntity.class, doc, analyzers ) );

    doc = new Document();
    Field field = new Field(
        "StringF",
        "String field",
        Field.Store.YES,
        Field.Index.ANALYZED,
        Field.TermVector.WITH_OFFSETS
    );
//    field.setOmitNorms( true );
//    field.setOmitTermFreqAndPositions( true );
    field.setBoost( 3f );
    doc.add( field );

    field = new Field(
        "StringF2",
        "String field 2",
        Field.Store.YES,
        Field.Index.ANALYZED,
        Field.TermVector.WITH_OFFSETS
    );
    doc.add( field );

    byte[] array = new byte[4];
    array[0] = 2;
    array[1] = 5;
    array[2] = 5;
    array[3] = 8;
    field = new Field( "binary", array, 0, array.length );
    doc.add( field );

    SerializableStringReader reader = new SerializableStringReader();
    field = new Field( "ReaderField", reader, Field.TermVector.WITH_OFFSETS );
    doc.add( field );

    List<List<AttributeImpl>> tokens = AvroTestHelpers.buildTokenSteamWithAttributes();

    CopyTokenStream tokenStream = new CopyTokenStream( tokens );
    field = new Field( "tokenstream", tokenStream, Field.TermVector.WITH_POSITIONS_OFFSETS );
//    field.setOmitNorms( true );
//    field.setOmitTermFreqAndPositions( true );
    field.setBoost( 3f );
    doc.add( field );

    works.add( new UpdateLuceneWork( 1234, "1234", RemoteEntity.class, doc ) );
    works.add( new AddLuceneWork( 125, "125", RemoteEntity.class, new Document() ) );
    return works;
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.