pagename = parsePageCheckCondition( pagename );
WikiPage page = m_engine.getPage( pagename );
String pagedata = m_engine.getPureText( page );
LinkCollector localCollector = new LinkCollector();
LinkCollector extCollector = new LinkCollector();
LinkCollector attCollector = new LinkCollector();
WikiContext context = new WikiContext( m_engine, page );
context.setVariable( WikiEngine.PROP_REFSTYLE, "absolute" );
m_engine.textToHTML( context,
pagedata,
localCollector,
extCollector,
attCollector );
Vector<Hashtable<String, String>> result = new Vector<Hashtable<String, String>>();
//
// Add local links.
//
for( Iterator i = localCollector.getLinks().iterator(); i.hasNext(); )
{
String link = (String) i.next();
Hashtable<String, String> ht = new Hashtable<String, String>();
ht.put( "page", toRPCString( link ) );
ht.put( "type", LINK_LOCAL );
//
// FIXME: This is a kludge. The link format should really be queried
// from the TranslatorReader itself. Also, the link format should probably
// have information on whether the page exists or not.
//
//
// FIXME: The current link collector interface is not very good, since
// it causes this.
//
if( m_engine.pageExists(link) )
{
ht.put( "href", context.getURL(WikiContext.VIEW,link) );
}
else
{
ht.put( "href", context.getURL(WikiContext.EDIT,link) );
}
result.add( ht );
}
//
// Add links to inline attachments
//
for( Iterator i = attCollector.getLinks().iterator(); i.hasNext(); )
{
String link = (String) i.next();
Hashtable<String, String> ht = new Hashtable<String, String>();