assert discardPunctuation;
}
altToken = null;
}
final Dictionary dict = getDict(backType);
if (backType == Type.USER) {
// Expand the phraseID we recorded into the actual
// segmentation:
final int[] wordIDAndLength = userDictionary.lookupSegmentation(backID);
int wordID = wordIDAndLength[0];
int current = 0;
for(int j=1; j < wordIDAndLength.length; j++) {
final int len = wordIDAndLength[j];
//System.out.println(" add user: len=" + len);
pending.add(new Token(wordID+j-1,
fragment,
current + offset,
len,
Type.USER,
current + backPos,
dict));
if (VERBOSE) {
System.out.println(" add USER token=" + pending.get(pending.size()-1));
}
current += len;
}
// Reverse the tokens we just added, because when we
// serve them up from incrementToken we serve in
// reverse:
Collections.reverse(pending.subList(pending.size() - (wordIDAndLength.length - 1),
pending.size()));
backCount += wordIDAndLength.length-1;
} else {
if (extendedMode && backType == Type.UNKNOWN) {
// In EXTENDED mode we convert unknown word into
// unigrams:
int unigramTokenCount = 0;
for(int i=length-1;i>=0;i--) {
int charLen = 1;
if (i > 0 && Character.isLowSurrogate(fragment[offset+i])) {
i--;
charLen = 2;
}
//System.out.println(" extended tok offset="
//+ (offset + i));
if (!discardPunctuation || !isPunctuation(fragment[offset+i])) {
pending.add(new Token(CharacterDefinition.NGRAM,
fragment,
offset + i,
charLen,
Type.UNKNOWN,
backPos + i,
unkDictionary));
unigramTokenCount++;
}
}
backCount += unigramTokenCount;
} else if (!discardPunctuation || length == 0 || !isPunctuation(fragment[offset])) {
pending.add(new Token(backID,
fragment,
offset,
length,
backType,
backPos,
dict));
if (VERBOSE) {
System.out.println(" add token=" + pending.get(pending.size()-1));
}
backCount++;
} else {
if (VERBOSE) {
System.out.println(" skip punctuation token=" + new String(fragment, offset, length));
}
}
}
lastLeftWordID = dict.getLeftId(backID);
pos = backPos;
bestIDX = nextBestIDX;
}
lastBackTracePos = endPos;