{
if( head.tuples != null )
writeValuesTuple( platformBroker, head );
}
LingualFlowFactory flowFactory = platformBroker.getFlowFactory( branch );
Optiq.writeSQLPlan( properties, flowFactory.getName(), getVolcanoPlanner() );
for( Ref head : branch.heads.keySet() )
{
TableDef tableDefFor = getTableDefFor( platformBroker, head );
String[] jarPath = ClassLoaderUtil.getJarPaths( getPlatformBroker(), tableDefFor );
flowFactory.addSource( head.name, tableDefFor, jarPath );
}
FlowListener flowListener = null;
if( branch.tailTableDef != null )
{
TableDef tableDef = branch.tailTableDef;
String[] jarPath = ClassLoaderUtil.getJarPaths( getPlatformBroker(), tableDef );
flowFactory.addSink( tableDef.getName(), tableDef, jarPath );
}
else
{
Resource<Protocol, Format, SinkMode> resource = createResultResource( platformBroker, flowFactory );
flowFactory.addSink( branch.current.getName(), resource );
if( platformBroker.hasResultSchemaDef() )
flowListener = new AddResultTableListener( platformBroker, flowFactory.getLingualConnection() );
}
String flowPlanPath = setFlowPlanPath( properties, flowFactory.getName() );
ClassLoader jarLoader = ClassLoaderUtil.getJarClassLoader( platformBroker, flowFactory );
// set ClassLoader _before_ creating the flow so that all classes can be loaded into the JobConf object (in case of
// hadoop platform).
if( jarLoader != null )
Thread.currentThread().setContextClassLoader( jarLoader );
Flow flow;
try
{
flow = flowFactory.create();
}
catch( PlannerException exception )
{
LOG.error( "planner failed", exception );