doc.add( new Field( "field_super_exact", fieldValue, matched ) ); // Whitespace tokenized without toLower
doc.add( new Field( "field_characters", fieldValue, matched ) ); // Each letter is a token
doc.add( new Field( "field_tripples", fieldValue, matched ) ); // Every three letters is a token
doc.add( new Field( "field_sliced", fieldValue.substring( 0, // Sliced at 10 chars then analyzed just like field
Math.min( fieldValue.length() - 1 , 10 ) ), matched ) );
doc.add( new Field( "field_der_red", new CannedTokenStream( // Hacky field containing "der" and "red" at pos = 0
token( "der", 1, 0, 3 ),
token( "red", 0, 0, 3 )
), matched ) );
final Map<String, Analyzer> fieldAnalyzers = new TreeMap<String, Analyzer>();