// No point going any further.
return results;
}
// Setup the converter
LuceneEntryConverter converter = null;
RepositoryContentIndex index = (RepositoryContentIndex) indexes.get( 0 );
converter = index.getEntryConverter();
// Process indexes into an array of Searchables.
List searchableList = new ArrayList( indexes );
CollectionUtils.transform( searchableList, searchableTransformer );
Searchable searchables[] = new Searchable[searchableList.size()];
searchableList.toArray( searchables );
MultiSearcher searcher = null;
try
{
// Create a multi-searcher for looking up the information.
searcher = new MultiSearcher( searchables );
// Perform the search.
Hits hits = searcher.search( specificQuery );
int hitCount = hits.length();
// Now process the limits.
results.setLimits( limits );
results.setTotalHits( hitCount );
int fetchCount = limits.getPageSize();
int offset = ( limits.getSelectedPage() * limits.getPageSize() );
if ( limits.getSelectedPage() == SearchResultLimits.ALL_PAGES )
{
fetchCount = hitCount;
offset = 0;
}
// Goto offset.
if ( offset < hitCount )
{
// only process if the offset is within the hit count.
for ( int i = 0; i <= fetchCount; i++ )
{
// Stop fetching if we are past the total # of available hits.
if ( offset + i >= hitCount )
{
break;
}
try
{
Document doc = hits.doc( offset + i );
LuceneRepositoryContentRecord record = converter.convert( doc );
results.addHit( record );
}
catch ( java.text.ParseException e )
{
getLogger().warn( "Unable to parse document into record: " + e.getMessage(), e );