All files / kernel-language-model-service/src/ollama nodejs.ts

70% Statements 7/10
100% Branches 3/3
50% Functions 2/4
70% Lines 7/10

Press n or j to go to the next uncovered block, b, p or k for the previous block.

1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70                                                          13x 13x 2x   11x 11x   3x                             1x                                    
import { Ollama } from 'ollama';
 
import type {
  ChatParams,
  ChatResult,
  SampleParams,
  SampleResult,
} from '../types.ts';
import { OllamaBaseService } from './base.ts';
import type { SampleStreamResult } from './base.ts';
import { defaultClientConfig } from './constants.ts';
import type { OllamaClient, OllamaNodejsConfig } from './types.ts';
 
/**
 * Node.js-specific implementation of the Ollama service.
 * Extends OllamaBaseService to provide a concrete implementation for Node.js environments.
 * Requires an explicit fetch endowment.
 */
export class OllamaNodejsService extends OllamaBaseService<OllamaClient> {
  /**
   * Creates a new Ollama Node.js service.
   *
   * @param config - The configuration for the service
   * @param config.endowments - Required endowments for the service
   * @param config.endowments.fetch - The fetch implementation to use for HTTP requests
   * @param config.clientConfig - Optional configuration for the Ollama client
   * @throws {Error} When fetch is not provided in endowments
   */
  constructor(config: OllamaNodejsConfig) {
    const { endowments, clientConfig = {} } = config;
    if (!endowments?.fetch) {
      throw new Error('Must endow a fetch implementation.');
    }
    const resolvedConfig = { ...defaultClientConfig, ...clientConfig };
    super(
      async () =>
        new Ollama({
          ...resolvedConfig,
          fetch: endowments.fetch,
        }) as OllamaClient,
    );
  }
}
 
/**
 * Creates a hardened kernel service backend backed by a local Ollama instance.
 *
 * @param config - Configuration for the Ollama Node.js service.
 * @returns An object with `chat` and `sample` methods for use with
 *   `makeKernelLanguageModelService`.
 */
export const makeOllamaNodejsKernelService = (
  config: OllamaNodejsConfig,
): {
  chat: (params: ChatParams) => Promise<ChatResult>;
  sample: {
    (params: SampleParams & { stream: true }): Promise<SampleStreamResult>;
    (params: SampleParams & { stream?: false }): Promise<SampleResult>;
  };
} => {
  const service = new OllamaNodejsService(config);
  return harden({
    chat: async (params: ChatParams) => service.chat(params),
    sample: service.sample.bind(service) as {
      (params: SampleParams & { stream: true }): Promise<SampleStreamResult>;
      (params: SampleParams & { stream?: false }): Promise<SampleResult>;
    },
  });
};