for (org.apache.hadoop.mapreduce.InputSplit split : splits) {
String rack =
((org.apache.hadoop.mapreduce.split.TezGroupedSplit) split).getRack();
if (rack == null) {
if (split.getLocations() != null) {
locationHints.add(new TaskLocationHint(new HashSet<String>(Arrays
.asList(split.getLocations())), null));
} else {
locationHints.add(new TaskLocationHint(null, null));
}
} else {
locationHints.add(new TaskLocationHint(null,
Collections.singleton(rack)));
}
}
inputSplitInfo = new InputSplitInfoMem(splits, locationHints, splits.length, null, conf);
} else {
LOG.info("Grouping mapred api input splits");
org.apache.hadoop.mapred.InputSplit[] splits = MRHelpers
.generateOldSplits(jobConf, realInputFormatName, numTasks);
List<TaskLocationHint> locationHints = Lists
.newArrayListWithCapacity(splits.length);
for (org.apache.hadoop.mapred.InputSplit split : splits) {
String rack =
((org.apache.hadoop.mapred.split.TezGroupedSplit) split).getRack();
if (rack == null) {
if (split.getLocations() != null) {
locationHints.add(new TaskLocationHint(new HashSet<String>(Arrays
.asList(split.getLocations())), null));
} else {
locationHints.add(new TaskLocationHint(null, null));
}
} else {
locationHints.add(new TaskLocationHint(null,
Collections.singleton(rack)));
}
}
inputSplitInfo = new InputSplitInfoMem(splits, locationHints, splits.length, null, conf);
}