All files / kernel-language-model-service/src/test-utils/queue model.ts

100% Statements 8/8
100% Branches 6/6
100% Functions 7/7
100% Lines 7/7

Press n or j to go to the next uncovered block, b, p or k for the previous block.

1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81                                                                                  1x                       13x               13x 1x                   4x     6x 6x        
import { objectResponseFormatter } from './response.ts';
import type { ResponseFormatter } from './response.ts';
import type { Tokenizer } from './tokenizer.ts';
import { whitespaceTokenizer } from './tokenizer.ts';
import {
  makeAbortableAsyncIterable,
  makeEmptyStreamWithAbort,
  mapAsyncIterable,
  normalizeToAsyncIterable,
} from './utils.ts';
import type { StreamWithAbort } from './utils.ts';
import type { LanguageModel, ModelInfo } from '../../types.ts';
 
/**
 * Queue-based language model with helper methods for configuring responses.
 * Responses are queued and consumed by sample() calls.
 *
 * @template Response - The type of response generated by the model
 */
export type QueueLanguageModel<Response extends object> =
  // QueueLanguageModel does not support any sample options
  LanguageModel</* Options = */ unknown, Response> & {
    /**
     * Pushes a streaming response to the queue for the next sample() call.
     * The text will be tokenized and streamed token by token.
     *
     * @param text - The complete text to stream
     */
    push: (text: string) => void;
  };
 
/**
 * Make a queue-based language model instance.
 *
 * @template Response - The type of response generated by the model
 * @param options - Configuration options for the model
 * @param options.tokenizer - The tokenizer function to use. Defaults to whitespace splitting.
 * @param options.responseFormatter - The function to use to format each yielded token into a response. Defaults to an object with a response and done property.
 * @param options.responseQueue - For testing only. The queue to use for responses. Defaults to an empty array.
 * @returns A queue-based language model instance.
 */
export const makeQueueModel = <
  Response extends object = { response: string; done: boolean },
>({
  tokenizer = whitespaceTokenizer,
  responseFormatter = objectResponseFormatter as ResponseFormatter<Response>,
  // Available for testing
  responseQueue = [],
}: {
  tokenizer?: Tokenizer;
  responseFormatter?: ResponseFormatter<Response>;
  responseQueue?: StreamWithAbort<Response>[];
} = {}): QueueLanguageModel<Response> => {
  const makeStreamWithAbort = (text: string): StreamWithAbort<Response> =>
    makeAbortableAsyncIterable(
      mapAsyncIterable(
        normalizeToAsyncIterable(tokenizer(text)),
        responseFormatter,
      ),
    );
 
  return harden({
    getInfo: async (): Promise<ModelInfo<Record<string, never>>> => ({
      model: 'test',
    }),
    load: async (): Promise<void> => {
      // No-op: queue model doesn't require loading
    },
    unload: async (): Promise<void> => {
      // No-op: queue model doesn't require unloading
    },
    sample: async (): Promise<StreamWithAbort<Response>> => {
      return responseQueue.shift() ?? makeEmptyStreamWithAbort();
    },
    push: (text: string): void => {
      const streamWithAbort = makeStreamWithAbort(text);
      responseQueue.push(streamWithAbort);
    },
  });
};