return percolate(new DocAndQueryRequest(request.doc(), query));
}
public Response percolate(DocAndQueryRequest request) throws ElasticSearchException {
// first, parse the source doc into a MemoryIndex
final CustomMemoryIndex memoryIndex = new CustomMemoryIndex();
// TODO: This means percolation does not support nested docs...
for (Fieldable field : request.doc().rootDoc().getFields()) {
if (!field.isIndexed()) {
continue;
}
// no need to index the UID field
if (field.name().equals(UidFieldMapper.NAME)) {
continue;
}
TokenStream tokenStream = field.tokenStreamValue();
if (tokenStream != null) {
memoryIndex.addField(field.name(), tokenStream, field.getBoost());
} else {
Reader reader = field.readerValue();
if (reader != null) {
try {
memoryIndex.addField(field.name(), request.doc().analyzer().reusableTokenStream(field.name(), reader), field.getBoost() * request.doc().rootDoc().getBoost());
} catch (IOException e) {
throw new MapperParsingException("Failed to analyze field [" + field.name() + "]", e);
}
} else {
String value = field.stringValue();
if (value != null) {
try {
memoryIndex.addField(field.name(), request.doc().analyzer().reusableTokenStream(field.name(), new FastStringReader(value)), field.getBoost() * request.doc().rootDoc().getBoost());
} catch (IOException e) {
throw new MapperParsingException("Failed to analyze field [" + field.name() + "]", e);
}
}
}
}
}
final IndexSearcher searcher = memoryIndex.createSearcher();
List<String> matches = new ArrayList<String>();
if (request.query() == null) {
Lucene.ExistsCollector collector = new Lucene.ExistsCollector();
for (Map.Entry<String, Query> entry : queries.entrySet()) {