All files / kernel-language-model-service/src/ollama base.ts

100% Statements 18/18
100% Branches 4/4
100% Functions 9/9
100% Lines 18/18

Press n or j to go to the next uncovered block, b, p or k for the previous block.

1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97                                                                      26x                 4x 3x                     12x 12x 12x 12x     12x           12x 3x   2x     2x     4x           3x   3x 2x     2x       12x      
import type { GenerateResponse, ListResponse } from 'ollama';
 
import type { LanguageModelService } from '../types.ts';
import { parseModelConfig } from './parse.ts';
import type {
  OllamaInstanceConfig,
  OllamaModel,
  OllamaClient,
  OllamaModelOptions,
} from './types.ts';
 
/**
 * Base service for interacting with Ollama language models.
 * Provides a generic interface for creating and managing Ollama model instances.
 * This class implements the LanguageModelService interface and handles the
 * creation of hardened model instances that can be safely passed between vats.
 *
 * @template Ollama - The type of Ollama client to use
 */
export class OllamaBaseService<Ollama extends OllamaClient>
  implements
    LanguageModelService<
      OllamaModelOptions,
      OllamaModelOptions,
      GenerateResponse
    >
{
  readonly #makeClient: () => Promise<Ollama>;
 
  /**
   * Creates a new Ollama base service.
   *
   * @param makeClient - Factory function that creates an Ollama client instance
   */
  constructor(makeClient: () => Promise<Ollama>) {
    this.#makeClient = makeClient;
  }
 
  /**
   * Retrieves a list of available models from the Ollama server.
   *
   * @returns A promise that resolves to the list of available models
   */
  async getModels(): Promise<ListResponse> {
    const client = await this.#makeClient();
    return await client.list();
  }
 
  /**
   * Creates a new language model instance with the specified configuration.
   * The returned instance is hardened for object capability security.
   *
   * @param config - The configuration for the model instance
   * @returns A promise that resolves to a hardened language model instance
   */
  async makeInstance(config: OllamaInstanceConfig): Promise<OllamaModel> {
    const modelInfo = parseModelConfig(config);
    const { model } = modelInfo;
    const ollama = await this.#makeClient();
    const defaultOptions = {
      ...(config.options ?? {}),
    };
    const mandatoryOptions = {
      model,
      stream: true,
      raw: true,
    };
 
    const instance = {
      getInfo: async () => modelInfo,
      load: async () => {
        await ollama.generate({ model, keep_alive: -1, prompt: '' });
      },
      unload: async () => {
        await ollama.generate({ model, keep_alive: 0, prompt: '' });
      },
      sample: async (prompt: string, options?: Partial<OllamaModelOptions>) => {
        const response = await ollama.generate({
          ...defaultOptions,
          ...(options ?? {}),
          ...mandatoryOptions,
          prompt,
        });
        return {
          stream: (async function* () {
            for await (const chunk of response) {
              yield chunk;
            }
          })(),
          abort: async () => response.abort(),
        };
      },
    };
    return harden(instance);
  }
}