serializationFactory));
String rack =
((org.apache.hadoop.mapreduce.split.TezGroupedSplit) split).getRack();
if (rack == null) {
if (split.getLocations() != null) {
locationHints.add(new TaskLocationHint(new HashSet<String>(Arrays
.asList(split.getLocations())), null));
} else {
locationHints.add(new TaskLocationHint(null, null));
}
} else {
locationHints.add(new TaskLocationHint(null,
Collections.singleton(rack)));
}
}
inputSplitInfo = new InputSplitInfoMem(splitsBuilder.build(),
locationHints, splits.length);
} else {
LOG.info("Grouping mapred api input splits");
org.apache.hadoop.mapred.InputSplit[] splits = MRHelpers
.generateOldSplits(jobConf, realInputFormatName,
rootInputContext.getNumTasks());
List<TaskLocationHint> locationHints = Lists
.newArrayListWithCapacity(splits.length);
MRSplitsProto.Builder splitsBuilder = MRSplitsProto.newBuilder();
for (org.apache.hadoop.mapred.InputSplit split : splits) {
splitsBuilder.addSplits(MRHelpers.createSplitProto(split));
String rack =
((org.apache.hadoop.mapred.split.TezGroupedSplit) split).getRack();
if (rack == null) {
if (split.getLocations() != null) {
locationHints.add(new TaskLocationHint(new HashSet<String>(Arrays
.asList(split.getLocations())), null));
} else {
locationHints.add(new TaskLocationHint(null, null));
}
} else {
locationHints.add(new TaskLocationHint(null,
Collections.singleton(rack)));
}
}
inputSplitInfo = new InputSplitInfoMem(splitsBuilder.build(),
locationHints, splits.length);