ObjectWriter jsonWriter = objectMapper.writerWithDefaultPrettyPrinter();
CharStream stream = new ANTLRInputStream(sql);
DruidSQLLexer lexer = new DruidSQLLexer(stream);
TokenStream tokenStream = new CommonTokenStream(lexer);
DruidSQLParser parser = new DruidSQLParser(tokenStream);
lexer.removeErrorListeners();
parser.removeErrorListeners();
lexer.addErrorListener(ConsoleErrorListener.INSTANCE);
parser.addErrorListener(ConsoleErrorListener.INSTANCE);
try {
DruidSQLParser.QueryContext queryContext = parser.query();
if(parser.getNumberOfSyntaxErrors() > 0) throw new IllegalStateException();
// parser.setBuildParseTree(true);
// System.err.println(q.toStringTree(parser));
} catch(Exception e) {
String msg = e.getMessage();
if(msg != null) System.err.println(e);
System.exit(1);
}
final Query query;
final TypeReference typeRef;
boolean groupBy = false;
if(parser.groupByDimensions.isEmpty()) {
query = Druids.newTimeseriesQueryBuilder()
.dataSource(parser.getDataSource())
.aggregators(new ArrayList<AggregatorFactory>(parser.aggregators.values()))
.postAggregators(parser.postAggregators)
.intervals(parser.intervals)
.granularity(parser.granularity)
.filters(parser.filter)
.build();
typeRef = new TypeReference<List<Result<TimeseriesResultValue>>>(){};
} else {
query = GroupByQuery.builder()
.setDataSource(parser.getDataSource())
.setAggregatorSpecs(new ArrayList<AggregatorFactory>(parser.aggregators.values()))
.setPostAggregatorSpecs(parser.postAggregators)
.setInterval(parser.intervals)
.setGranularity(parser.granularity)
.setDimFilter(parser.filter)