boolean hasMoreTokens = stream.incrementToken();
fieldState.attributeSource = stream;
OffsetAttribute offsetAttribute = (OffsetAttribute) fieldState.attributeSource.addAttribute(OffsetAttribute.class);
PositionIncrementAttribute posIncrAttribute = (PositionIncrementAttribute) fieldState.attributeSource.addAttribute(PositionIncrementAttribute.class);
consumer.start(field);
for(;;) {
// If we hit an exception in stream.next below
// (which is fairly common, eg if analyzer
// chokes on a given document), then it's
// non-aborting and (above) this one document
// will be marked as deleted, but still
// consume a docID
if (!hasMoreTokens) break;
final int posIncr = posIncrAttribute.getPositionIncrement();
fieldState.position += posIncr;
if (allowMinus1Position || fieldState.position > 0) {
fieldState.position--;
}
if (posIncr == 0)
fieldState.numOverlap++;
boolean success = false;
try {
// If we hit an exception in here, we abort
// all buffered documents since the last
// flush, on the likelihood that the
// internal state of the consumer is now
// corrupt and should not be flushed to a
// new segment:
consumer.add();
success = true;
} finally {
if (!success)
docState.docWriter.setAborting();
}
fieldState.position++;
offsetEnd = fieldState.offset + offsetAttribute.endOffset();
if (++fieldState.length >= maxFieldLength) {
if (docState.infoStream != null)
docState.infoStream.println("maxFieldLength " +maxFieldLength+ " reached for field " + fieldInfo.name + ", ignoring following tokens");
break;
}
hasMoreTokens = stream.incrementToken();
}
// trigger streams to perform end-of-stream operations
stream.end();
fieldState.offset += offsetAttribute.endOffset();
anyToken = fieldState.length > startLength;
} finally {
stream.close();
}
}