Press n or j to go to the next uncovered block, b, p or k for the previous block.
| 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 | 2x 13x 13x 13x 13x 22x 22x 22x 22x 22x 22x 14x 6x 14x 8x 22x 13x 3x 3x 4x 13x | /**
* Tokenizer function that converts a string into tokens.
* Can return either a synchronous array or an async iterable.
*
* @param text - The text to tokenize
* @returns Either an array of tokens or an async iterable of tokens
*/
export type Tokenizer = (text: string) => string[] | AsyncIterable<string>;
/**
* Split text by whitespace.
* For each word, attach at most one whitespace character from the whitespace
* immediately preceding it. Any extra whitespace becomes separate tokens.
*
* @param text - The text to tokenize
* @returns An array of tokens
*/
export const whitespaceTokenizer = (text: string): string[] => {
const tokens: string[] = [];
// Match words with optional preceding whitespace (captured in group 1)
const regex = /(\s*)(\S+)/gu;
let match: RegExpExecArray | null;
let lastIndex = 0;
while ((match = regex.exec(text)) !== null) {
const [, whitespace, word] = match;
const matchIndex = match.index;
Iif (!word) {
continue;
}
const whitespaceStr = whitespace ?? '';
const whitespaceLength = whitespaceStr.length;
// Process whitespace before the word
if (whitespaceLength > 0) {
// Add all but one whitespace character as separate tokens (before the word)
for (const char of whitespaceStr.slice(0, whitespaceLength - 1)) {
tokens.push(char);
}
// Attach the last whitespace character to the word
tokens.push(whitespaceStr[whitespaceLength - 1] + word);
} else {
tokens.push(word);
}
lastIndex = matchIndex + whitespaceLength + word.length;
}
// Add any trailing whitespace as separate tokens
if (lastIndex < text.length) {
const trailing = text.slice(lastIndex);
for (const char of trailing) {
tokens.push(char);
}
}
return tokens;
};
|