completedPartitionThreads = new ArrayBlockingQueue<Boolean>(numOfPartitions);
}
collectorDataQueue = new LinkedBlockingQueue<Serializable>();
for (int i = 0; i < numOfPartitions; i++) {
final PartitionExecutionImpl partitionExecution = isRestartNotOverride ? abortedPartitionExecutionsFromPrevious.get(i) : null;
final int partitionIndex = isRestartNotOverride ? partitionExecution.getPartitionId() : i;
final AbstractRunner<StepContextImpl> runner1;
final StepContextImpl stepContext1 = batchContext.clone();
final Step step1 = stepContext1.getStep();
final PartitionExecutionImpl partitionExecution1 = (PartitionExecutionImpl) stepContext1.getStepExecution();
final PropertyResolver resolver = new PropertyResolver();
if (partitionIndex >= 0 && partitionIndex < partitionProperties.length) {
resolver.setPartitionPlanProperties(partitionProperties[partitionIndex]);
//associate this partition represented by this StepExecutionImpl with this partition properties index. If this
//partition fails or is stopped, the restart process can select this partition properties.
partitionExecution1.setPartitionId(partitionIndex);
} else {
//some partitioned steps may not have any partition properties
partitionExecution1.setPartitionId(i);
}
resolver.setResolvePartitionPlanProperties(true);
resolver.resolve(step1);
if (isRestartNotOverride) {
stepContext1.setPersistentUserData(partitionExecution.getPersistentUserData());
stepContext1.getStepExecution().setReaderCheckpointInfo(partitionExecution.getReaderCheckpointInfo());
stepContext1.getStepExecution().setWriterCheckpointInfo(partitionExecution.getWriterCheckpointInfo());
}
if (isRestart && isOverride && reducer != null) {
reducer.rollbackPartitionedStep();
}
final Chunk ch = step1.getChunk();
if (ch == null) {
runner1 = new BatchletRunner(stepContext1, enclosingRunner, this, step1.getBatchlet());
} else {
runner1 = new ChunkRunner(stepContext1, enclosingRunner, this, ch);
}
if (i >= numOfThreads) {
completedPartitionThreads.take();
}
jobContext.getJobRepository().addPartitionExecution(stepExecution, partitionExecution1);
jobContext.getBatchEnvironment().submitTask(runner1);
}
BatchStatus consolidatedBatchStatus = BatchStatus.STARTED;
final List<PartitionExecutionImpl> fromAllPartitions = new ArrayList<PartitionExecutionImpl>();
tm.begin();
try {
while (fromAllPartitions.size() < numOfPartitions) {
final Serializable data = collectorDataQueue.take();
if (data instanceof PartitionExecutionImpl) {
final PartitionExecutionImpl s = (PartitionExecutionImpl) data;
if (step.getChunk() != null) {
stepExecution.getStepMetrics().addStepMetrics(s.getStepMetrics());
}
//save status and data for the terminated partition
jobContext.getJobRepository().savePersistentData(jobContext.getJobExecution(), s);
fromAllPartitions.add(s);
final BatchStatus bs = s.getBatchStatus();
if (bs == BatchStatus.FAILED || bs == BatchStatus.STOPPED) {
if (consolidatedBatchStatus != BatchStatus.FAILED) {
consolidatedBatchStatus = bs;
}
if (batchContext.getException() == null && s.getException() != null) {
batchContext.setException(s.getException());
}
}
if (analyzer != null) {
analyzer.analyzeStatus(bs, s.getExitStatus());
}
} else if (analyzer != null) {
analyzer.analyzeCollectorData(data);
}
}