if ( duplicateDelete ) return;
String idInString = idBridge.objectToString( id );
if ( workType == WorkType.ADD ) {
Document doc = getDocument( entity, id );
queue.add( new AddLuceneWork( id, idInString, entityClass, doc ) );
}
else if ( workType == WorkType.DELETE || workType == WorkType.PURGE ) {
queue.add( new DeleteLuceneWork( id, idInString, entityClass ) );
}
else if ( workType == WorkType.PURGE_ALL ) {
queue.add( new PurgeAllLuceneWork( entityClass ) );
}
else if ( workType == WorkType.UPDATE || workType == WorkType.COLLECTION ) {
Document doc = getDocument( entity, id );
/**
* even with Lucene 2.1, use of indexWriter to update is not an option
* We can only delete by term, and the index doesn't have a term that
* uniquely identify the entry.
* But essentially the optimization we are doing is the same Lucene is doing, the only extra cost is the
* double file opening.
*/
queue.add( new DeleteLuceneWork( id, idInString, entityClass ) );
queue.add( new AddLuceneWork( id, idInString, entityClass, doc ) );
}
else if ( workType == WorkType.INDEX ) {
Document doc = getDocument( entity, id );
queue.add( new DeleteLuceneWork( id, idInString, entityClass ) );
queue.add( new AddLuceneWork( id, idInString, entityClass, doc, true ) );
}
else {
throw new AssertionFailure( "Unknown WorkType: " + workType );
}
}