init((ShardInputSplit) split, compatContext.getConfiguration(), compatContext);
}
void init(ShardInputSplit esSplit, Configuration cfg, Progressable progressable) {
// get a copy to override the host/port
Settings settings = HadoopSettingsManager.loadFrom(cfg).copy().load(esSplit.settings);
if (log.isTraceEnabled()) {
log.trace(String.format("Init shard reader from cfg %s", HadoopCfgUtils.asProperties(cfg)));
log.trace(String.format("Init shard reader w/ settings %s", esSplit.settings));
}
this.esSplit = esSplit;
// initialize mapping/ scroll reader
InitializationUtils.setValueReaderIfNotSet(settings, WritableValueReader.class, log);
PartitionDefinition part = new PartitionDefinition(esSplit.nodeIp, esSplit.httpPort, esSplit.nodeName, esSplit.nodeId, esSplit.shardId, settings.save(), esSplit.mapping);
PartitionReader partitionReader = RestService.createReader(settings, part, log);
this.scrollReader = partitionReader.scrollReader;
this.client = partitionReader.client;
this.queryBuilder = partitionReader.queryBuilder;
// heart-beat
beat = new HeartBeat(progressable, cfg, settings.getHeartBeatLead(), log);
this.progressable = progressable;
if (log.isDebugEnabled()) {
log.debug(String.format("Initializing RecordReader for [%s]", esSplit));