* second failure -- the insistentAppend wrapping it ensures that it will
* continue retrying while the sink is open.
*/
static CommonTree substDFOChainsNoLet(String sink, List<String> collectors)
throws RecognitionException, FlumeSpecException {
PatternMatch dfoPat = recursive(var("dfo", FlumePatterns.sink(AUTO_DFO)));
CommonTree sinkTree = FlumeBuilder.parseSink(sink);
Map<String, CommonTree> dfoMatches = dfoPat.match(sinkTree);
if (dfoMatches == null) {
return sinkTree;
}
while (dfoMatches != null) {
// found a autoDFOChain, replace it with the chain.
CommonTree dfoTree = dfoMatches.get("dfo");
// All the logical sinks are lazy individually
CommonTree dfoPrimaryChain = buildFailChainAST(
"{ lazyOpen => logicalSink(\"%s\") }", collectors);
// Check if dfo is null
if (dfoPrimaryChain == null) {
dfoPrimaryChain = FlumeBuilder.parseSink("fail(\"no collectors\")");
}
// diskfailover's subsink needs to never give up. So we wrap it with an
// inistentAppend. But append can fail if its subsink is not open. So
// we add a stubborn append (it closes and reopens a subsink) and retries
// opening the chain using the insistentOpen
String dfo = "< " + FlumeSpecGen.genEventSink(dfoPrimaryChain)
+ " ? {diskFailover => "
+ "{ insistentAppend => { stubbornAppend => { insistentOpen =>"
+ FlumeSpecGen.genEventSink(dfoPrimaryChain) + " } } } } >";
CommonTree newDfoTree = FlumeBuilder.parseSink(dfo);
// subst
int idx = dfoTree.getChildIndex();
CommonTree parent = dfoTree.parent;
if (parent == null) {
sinkTree = newDfoTree;
} else {
parent.replaceChildren(idx, idx, newDfoTree);
}
// pattern match again.
dfoMatches = dfoPat.match(sinkTree);
}
return sinkTree;
}