if ( r!=null ) r.isStartRule = false;
}
}
void assignLexerTokenTypes(Grammar g, List<GrammarAST> tokensDefs) {
Grammar G = g.getOutermostGrammar(); // put in root, even if imported
for (GrammarAST def : tokensDefs) {
// tokens { id (',' id)* } so must check IDs not TOKEN_REF
if ( Grammar.isTokenName(def.getText()) ) {
G.defineTokenName(def.getText());
}
}
/* Define token types for nonfragment rules which do not include a 'type(...)'
* or 'more' lexer command.
*/
for (Rule r : g.rules.values()) {
if ( !r.isFragment() && !hasTypeOrMoreCommand(r) ) {
G.defineTokenName(r.name);
}
}
// FOR ALL X : 'xxx'; RULES, DEFINE 'xxx' AS TYPE X
List<Pair<GrammarAST,GrammarAST>> litAliases =
Grammar.getStringLiteralAliasesFromLexerRules(g.ast);
Set<String> conflictingLiterals = new HashSet<String>();
if ( litAliases!=null ) {
for (Pair<GrammarAST,GrammarAST> pair : litAliases) {
GrammarAST nameAST = pair.a;
GrammarAST litAST = pair.b;
if ( !G.stringLiteralToTypeMap.containsKey(litAST.getText()) ) {
G.defineTokenAlias(nameAST.getText(), litAST.getText());
}
else {
// oops two literal defs in two rules (within or across modes).
conflictingLiterals.add(litAST.getText());
}