return referenceModelForNoVariation(originalActiveRegion,false);
final AssemblyResultSet assemblyResult =
trimmingResult.needsTrimming() ? untrimmedAssemblyResult.trimTo(trimmingResult.getCallableRegion()) : untrimmedAssemblyResult;
final ActiveRegion regionForGenotyping = assemblyResult.getRegionForGenotyping();
// filter out reads from genotyping which fail mapping quality based criteria
//TODO - why don't do this before any assembly is done? Why not just once at the beginning of this method
//TODO - on the originalActiveRegion?
//TODO - if you move this up you might have to consider to change referenceModelForNoVariation
//TODO - that does also filter reads.
final Collection<GATKSAMRecord> filteredReads = filterNonPassingReads( regionForGenotyping );
final Map<String, List<GATKSAMRecord>> perSampleFilteredReadList = splitReadsBySample( filteredReads );
// abort early if something is out of the acceptable range
// TODO is this ever true at this point??? perhaps GGA. Need to check.
if( ! assemblyResult.isVariationPresent() && ! disableOptimizations)
return referenceModelForNoVariation(originalActiveRegion, false);
// For sure this is not true if gVCF is on.
if (dontGenotype) return NO_CALLS; // user requested we not proceed
// TODO is this ever true at this point??? perhaps GGA. Need to check.
if( regionForGenotyping.size() == 0 && ! disableOptimizations) {
// no reads remain after filtering so nothing else to do!
return referenceModelForNoVariation(originalActiveRegion, false);
}
// evaluate each sample's reads against all haplotypes
//logger.info("Computing read likelihoods with " + assemblyResult.regionForGenotyping.size() + " reads");
final List<Haplotype> haplotypes = assemblyResult.getHaplotypeList();
final Map<String,List<GATKSAMRecord>> reads = splitReadsBySample( regionForGenotyping.getReads() );
// Calculate the likelihoods: CPU intensive part.
final ReadLikelihoods<Haplotype> readLikelihoods =
likelihoodCalculationEngine.computeReadLikelihoods(assemblyResult,samplesList,reads);
// Realign reads to their best haplotype.
final Map<GATKSAMRecord,GATKSAMRecord> readRealignments = realignReadsToTheirBestHaplotype(readLikelihoods, assemblyResult.getPaddedReferenceLoc());
readLikelihoods.changeReads(readRealignments);
// Note: we used to subset down at this point to only the "best" haplotypes in all samples for genotyping, but there
// was a bad interaction between that selection and the marginalization that happens over each event when computing
// GLs. In particular, for samples that are heterozygous non-reference (B/C) the marginalization for B treats the
// haplotype containing C as reference (and vice versa). Now this is fine if all possible haplotypes are included
// in the genotyping, but we lose information if we select down to a few haplotypes. [EB]
final HaplotypeCallerGenotypingEngine.CalledHaplotypes calledHaplotypes = genotypingEngine.assignGenotypeLikelihoods(
haplotypes,
readLikelihoods,
perSampleFilteredReadList,
assemblyResult.getFullReferenceWithPadding(),
assemblyResult.getPaddedReferenceLoc(),
regionForGenotyping.getLocation(),
getToolkit().getGenomeLocParser(),
metaDataTracker,
(consensusMode ? Collections.<VariantContext>emptyList() : givenAlleles),
emitReferenceConfidence());