From eeb97cf726c89eb352da7049a3d87e9712348f7d Mon Sep 17 00:00:00 2001 From: Logan Ramos Date: Wed, 27 Aug 2025 15:48:51 -0400 Subject: [PATCH] Update chat model provider sample --- chat-model-provider-sample/package-lock.json | 8 +- chat-model-provider-sample/package.json | 14 +- chat-model-provider-sample/src/extension.ts | 2 +- chat-model-provider-sample/src/provider.ts | 18 +- .../{ => src}/vscode.d.ts | 230 +++++++++++++++++- chat-model-provider-sample/tsconfig.json | 9 +- .../vscode.proposed.chatProvider.d.ts | 144 ----------- 7 files changed, 243 insertions(+), 182 deletions(-) rename chat-model-provider-sample/{ => src}/vscode.d.ts (98%) delete mode 100644 chat-model-provider-sample/vscode.proposed.chatProvider.d.ts diff --git a/chat-model-provider-sample/package-lock.json b/chat-model-provider-sample/package-lock.json index f4c4cddd1c..e059f05118 100644 --- a/chat-model-provider-sample/package-lock.json +++ b/chat-model-provider-sample/package-lock.json @@ -12,7 +12,7 @@ "@eslint/js": "^9.13.0", "@stylistic/eslint-plugin": "^2.9.0", "@types/node": "^22", - "@types/vscode": "^1.102.0", + "@types/vscode": "^1.103.0", "@vscode/dts": "^0.4.1", "eslint": "^9.13.0", "typescript": "^5.8.2", @@ -360,9 +360,9 @@ } }, "node_modules/@types/vscode": { - "version": "1.102.0", - "resolved": "https://registry.npmjs.org/@types/vscode/-/vscode-1.102.0.tgz", - "integrity": "sha512-V9sFXmcXz03FtYTSUsYsu5K0Q9wH9w9V25slddcxrh5JgORD14LpnOA7ov0L9ALi+6HrTjskLJ/tY5zeRF3TFA==", + "version": "1.103.0", + "resolved": "https://registry.npmjs.org/@types/vscode/-/vscode-1.103.0.tgz", + "integrity": "sha512-o4hanZAQdNfsKecexq9L3eHICd0AAvdbLk6hA60UzGXbGH/q8b/9xv2RgR7vV3ZcHuyKVq7b37IGd/+gM4Tu+Q==", "dev": true, "license": "MIT" }, diff --git a/chat-model-provider-sample/package.json b/chat-model-provider-sample/package.json index 0bd8990b02..c69c652cc3 100644 --- a/chat-model-provider-sample/package.json +++ b/chat-model-provider-sample/package.json @@ -1,6 +1,5 @@ { "name": "chat-model-provider-sample", - "enabledApiProposals": ["chatProvider"], "publisher": "vscode-samples", "displayName": "Copilot Model Provider Sample", "description": "Sample extension which provides chat models via the LM API.", @@ -10,17 +9,14 @@ }, "version": "0.1.0", "engines": { - "vscode": "^1.103.0-20250721" + "vscode": "^1.104.0-20250828" }, "categories": [ "AI", "Chat" ], - "activationEvents": [ - "onStartupFinished" - ], "contributes": { - "languageModels": [ + "languageModelChatProviders": [ { "vendor": "sample", "displayName": "Sample Model Vendor" @@ -37,15 +33,13 @@ "lint": "eslint", "watch": "tsc -watch -p ./" }, - "dependencies": { - - }, + "dependencies": { }, "devDependencies": { "@eslint/js": "^9.13.0", "@stylistic/eslint-plugin": "^2.9.0", "@types/node": "^22", "@vscode/dts": "^0.4.1", - "@types/vscode": "^1.102.0", + "@types/vscode": "^1.103.0", "eslint": "^9.13.0", "typescript": "^5.8.2", "typescript-eslint": "^8.26.0" diff --git a/chat-model-provider-sample/src/extension.ts b/chat-model-provider-sample/src/extension.ts index 5975037d1e..114e915061 100644 --- a/chat-model-provider-sample/src/extension.ts +++ b/chat-model-provider-sample/src/extension.ts @@ -2,7 +2,7 @@ import * as vscode from 'vscode'; import { SampleChatModelProvider } from './provider'; export function activate(_: vscode.ExtensionContext) { - vscode.lm.registerChatModelProvider('sample', new SampleChatModelProvider()); + vscode.lm.registerLanguageModelChatProvider('sample', new SampleChatModelProvider()); } export function deactivate() { } diff --git a/chat-model-provider-sample/src/provider.ts b/chat-model-provider-sample/src/provider.ts index 1a174e6ded..7064087d9d 100644 --- a/chat-model-provider-sample/src/provider.ts +++ b/chat-model-provider-sample/src/provider.ts @@ -1,35 +1,35 @@ -import { CancellationToken, ChatResponseFragment2, LanguageModelChatInformation, LanguageModelChatMessage, LanguageModelChatProvider2, LanguageModelChatRequestHandleOptions, LanguageModelTextPart, Progress, ProviderResult } from "vscode"; +import { CancellationToken, LanguageModelChatInformation, LanguageModelChatMessage, LanguageModelChatProvider, LanguageModelChatRequestHandleOptions, LanguageModelResponsePart, LanguageModelTextPart, Progress, ProviderResult } from "vscode"; function getChatModelInfo(id: string, name: string): LanguageModelChatInformation { return { id, name, - description: "A sample chat model for demonstration purposes.", + tooltip: "A sample chat model for demonstration purposes.", family: "sample-family", maxInputTokens: 120000, maxOutputTokens: 8192, version: "1.0.0", capabilities: { toolCalling: true, - vision: true, + imageInput: true, } }; } -export class SampleChatModelProvider implements LanguageModelChatProvider2 { - prepareLanguageModelChat(_options: { silent: boolean; }, _token: CancellationToken): ProviderResult { +export class SampleChatModelProvider implements LanguageModelChatProvider { + prepareLanguageModelChatInformation(_options: { silent: boolean; }, _token: CancellationToken): ProviderResult { return [ getChatModelInfo("sample-dog-model", "Dog Model"), getChatModelInfo("sample-cat-model", "Cat Model"), ]; } - async provideLanguageModelChatResponse(model: LanguageModelChatInformation, _messages: Array, _options: LanguageModelChatRequestHandleOptions, progress: Progress, _token: CancellationToken): Promise { + async provideLanguageModelChatResponse(model: LanguageModelChatInformation, _messages: Array, _options: LanguageModelChatRequestHandleOptions, progress: Progress, _token: CancellationToken): Promise { if (model.id === "sample-dog-model") { - progress.report({index: 0, part: new LanguageModelTextPart("Woof! This is a dog model response.") }); + progress.report(new LanguageModelTextPart("Woof! This is a dog model response.")); } else if (model.id === "sample-cat-model") { - progress.report({index: 0, part: new LanguageModelTextPart("Meow! This is a cat model response.") }); + progress.report(new LanguageModelTextPart("Meow! This is a cat model response.")); } else { - progress.report({ index: 0, part: new LanguageModelTextPart("Unknown model.") }); + progress.report(new LanguageModelTextPart("Unknown model.")); } } async provideTokenCount(_model: LanguageModelChatInformation, _text: string | LanguageModelChatMessage, _token: CancellationToken): Promise { diff --git a/chat-model-provider-sample/vscode.d.ts b/chat-model-provider-sample/src/vscode.d.ts similarity index 98% rename from chat-model-provider-sample/vscode.d.ts rename to chat-model-provider-sample/src/vscode.d.ts index 0011a542fe..984c56de43 100644 --- a/chat-model-provider-sample/vscode.d.ts +++ b/chat-model-provider-sample/src/vscode.d.ts @@ -3133,12 +3133,12 @@ declare module 'vscode' { */ export class EvaluatableExpression { - /* + /** * The range is used to extract the evaluatable expression from the underlying document and to highlight it. */ readonly range: Range; - /* + /** * If specified the expression overrides the extracted expression. */ readonly expression?: string | undefined; @@ -12464,6 +12464,20 @@ declare module 'vscode' { * This will only take effect when `terminal.integrated.enablePersistentSessions` is enabled. */ isTransient?: boolean; + + /** + * The nonce to use to verify shell integration sequences are coming from a trusted source. + * An example impact of UX of this is if the command line is reported with a nonce, it will + * not need to verify with the user that the command line is correct before rerunning it + * via the [shell integration command decoration](https://code.visualstudio.com/docs/terminal/shell-integration#_command-decorations-and-the-overview-ruler). + * + * This should be used if the terminal includes [custom shell integration support](https://code.visualstudio.com/docs/terminal/shell-integration#_supported-escape-sequences). + * It should be set to a random GUID which will then set the `VSCODE_NONCE` environment + * variable. Inside the shell, this should then be removed from the environment so as to + * protect it from general access. Once that is done it can be passed through in the + * relevant sequences to make them trusted. + */ + shellIntegrationNonce?: string; } /** @@ -12503,6 +12517,18 @@ declare module 'vscode' { * This will only take effect when `terminal.integrated.enablePersistentSessions` is enabled. */ isTransient?: boolean; + + /** + * The nonce to use to verify shell integration sequences are coming from a trusted source. + * An example impact of UX of this is if the command line is reported with a nonce, it will + * not need to verify with the user that the command line is correct before rerunning it + * via the [shell integration command decoration](https://code.visualstudio.com/docs/terminal/shell-integration#_command-decorations-and-the-overview-ruler). + * + * This should be used if the terminal includes [custom shell integration support](https://code.visualstudio.com/docs/terminal/shell-integration#_supported-escape-sequences). + * It should be set to a random GUID. Inside the {@link Pseudoterminal} implementation, this value + * can be passed through in the relevant sequences to make them trusted. + */ + shellIntegrationNonce?: string; } /** @@ -13857,6 +13883,9 @@ declare module 'vscode' { * In the same way, symbolic links are preserved, i.e. the file event will report the path of the * symbolic link as it was provided for watching and not the target. * + * *Note* that file events from deleting a folder may not include events for contained files. If possible + * events will be aggregated to reduce the overal number of emitted events. + * * ### Examples * * The basic anatomy of a file watcher is as follows: @@ -19966,7 +19995,7 @@ declare module 'vscode' { * A string or heterogeneous array of things that a message can contain as content. Some parts may be message-type * specific for some models. */ - content: Array; + content: Array; /** * The optional name of a user for this message. @@ -19980,7 +20009,7 @@ declare module 'vscode' { * @param content The content of the message. * @param name The optional name of a user for the message. */ - constructor(role: LanguageModelChatMessageRole, content: string | Array, name?: string); + constructor(role: LanguageModelChatMessageRole, content: string | Array, name?: string); } /** @@ -20346,6 +20375,186 @@ declare module 'vscode' { resolveMcpServerDefinition?(server: T, token: CancellationToken): ProviderResult; } + /** + * The provider version of {@linkcode LanguageModelChatRequestOptions} + */ + export interface LanguageModelChatRequestHandleOptions { + /** + * A set of options that control the behavior of the language model. These options are specific to the language model + * and need to be looked up in the respective documentation. + */ + readonly modelOptions?: { readonly [name: string]: any }; + + /** + * An optional list of tools that are available to the language model. These could be registered tools available via + * {@link lm.tools}, or private tools that are just implemented within the calling extension. + * + * If the LLM requests to call one of these tools, it will return a {@link LanguageModelToolCallPart} in + * {@link LanguageModelChatResponse.stream}. It's the caller's responsibility to invoke the tool. If it's a tool + * registered in {@link lm.tools}, that means calling {@link lm.invokeTool}. + * + * Then, the tool result can be provided to the LLM by creating an Assistant-type {@link LanguageModelChatMessage} with a + * {@link LanguageModelToolCallPart}, followed by a User-type message with a {@link LanguageModelToolResultPart}. + */ + readonly tools?: readonly LanguageModelChatTool[]; + + /** + * The tool-selecting mode to use. {@link LanguageModelChatToolMode.Auto} by default. + */ + readonly toolMode?: LanguageModelChatToolMode; + } + + /** + * All the information representing a single language model contributed by a {@linkcode LanguageModelChatProvider}. + */ + export interface LanguageModelChatInformation { + + /** + * Unique identifier for the language model. Must be unique per provider, but not required to be globally unique. + */ + readonly id: string; + + /** + * Human-readable name of the language model. + */ + readonly name: string; + + /** + * Opaque family-name of the language model. Values might be `gpt-3.5-turbo`, `gpt4`, `phi2`, or `llama` + * but they are defined by extensions contributing languages and subject to change. + */ + readonly family: string; + + /** + * The tooltip to render when hovering the model + */ + readonly tooltip?: string; + + /** + * An optional, human-readable string which will be rendered alongside the model. + */ + readonly detail?: string; + + /** + * Opaque version string of the model. This is defined by the extension contributing the language model + * and subject to change while the identifier is stable. + * This is used as a lookup value in {@linkcode LanguageModelChatSelector.version} + * An example is how GPT 4o has multiple versions like 2024-11-20 and 2024-08-06 + */ + readonly version: string; + + /** + * The maximum number of tokens the model can accept as input. + */ + readonly maxInputTokens: number; + + /** + * The maximum number of tokens the model is capable of producing. + */ + readonly maxOutputTokens: number; + + /** + * Various features that the model supports such as tool calling or image input. + */ + readonly capabilities: { + + /** + * Whether image input is supported by the model. + * Common supported images are jpg and png, but each model will vary in supported mimetypes. + */ + readonly imageInput?: boolean; + + /** + * Whether tool calling is supported by the model. + * If a number is provided, that is the maximum number of tools a model can call. + */ + readonly toolCalling?: boolean | number; + }; + } + + /** + * The provider version of {@linkcode LanguageModelChatMessage}. + */ + export interface LanguageModelChatRequestMessage { + /** + * The role of this message. + */ + readonly role: LanguageModelChatMessageRole; + + /** + * A string or heterogeneous array of things that a message can contain as content. Some parts may be message-type + * specific for some models. + */ + readonly content: ReadonlyArray; + + /** + * The optional name of a user for this message. + */ + readonly name: string | undefined; + } + + /** + * The various message types which a {@linkcode LanguageModelChatProvider} can emit in the chat response stream + */ + export type LanguageModelResponsePart = LanguageModelTextPart | LanguageModelToolResultPart | LanguageModelToolCallPart; + + /** + * The various message types which can be sent via {@linkcode LanguageModelChat.sendRequest } and processed by a {@linkcode LanguageModelChatProvider} + */ + export type LanguageModelInputPart = LanguageModelTextPart | LanguageModelToolResultPart | LanguageModelToolCallPart; + + /** + * Represents a Language model chat provider. This provider provides multiple models in a 1 provider to many model relationship + * An example of this would be how an OpenAI provider would provide models like gpt-5, o3, etc. + */ + export interface LanguageModelChatProvider { + + /** + * Signals a change from the provider to the editor so that {@linkcode prepareLanguageModelChatInformation} is called again + */ + readonly onDidChangeLanguageModelInformation?: Event; + + /** + * Get the list of available language models contributed by this provider + * @param options Options which specify the calling context of this function + * @param token A cancellation token which signals if the user cancelled the request or not + * @returns A promise that resolves to the list of available language models + */ + prepareLanguageModelChatInformation(options: PrepareLanguageModelChatModelOptions, token: CancellationToken): ProviderResult; + + /** + * Returns the response for a chat request, passing the results to the progress callback. + * The {@linkcode LanguageModelChatProvider} must emit the response parts to the progress callback as they are received from the language model. + * @param model The language model to use + * @param messages The messages to include in the request + * @param options Options for the request + * @param progress The progress to emit the streamed response chunks to + * @param token A cancellation token for the request + * @returns A promise that resolves when the response is complete. Results are actually passed to the progress callback. + */ + provideLanguageModelChatResponse(model: T, messages: readonly LanguageModelChatRequestMessage[], options: LanguageModelChatRequestHandleOptions, progress: Progress, token: CancellationToken): Thenable; + + /** + * Returns the number of tokens for a given text using the model specific tokenizer logic + * @param model The language model to use + * @param text The text to count tokens for + * @param token A cancellation token for the request + * @returns A promise that resolves to the number of tokens + */ + provideTokenCount(model: T, text: string | LanguageModelChatRequestMessage, token: CancellationToken): Thenable; + } + + /** + * The list of options passed into {@linkcode LanguageModelChatProvider.prepareLanguageModelChatInformation} + */ + export interface PrepareLanguageModelChatModelOptions { + /** + * Whether or not the user should be prompted via some UI flow, or if models should be attempted to be resolved silently. + * If silent is true, all models may not be resolved due to lack of info such as API keys. + */ + readonly silent: boolean; + } + /** * Namespace for language model related functionality. */ @@ -20405,7 +20614,7 @@ declare module 'vscode' { * any custom flow. * * In the former case, the caller shall pass the - * {@link LanguageModelToolInvocationOptions.toolInvocationToken toolInvocationToken}, which comes with the a + * {@link LanguageModelToolInvocationOptions.toolInvocationToken toolInvocationToken}, which comes from a * {@link ChatRequest.toolInvocationToken chat request}. This makes sure the chat UI shows the tool invocation for the * correct conversation. * @@ -20452,6 +20661,15 @@ declare module 'vscode' { * @returns A disposable that unregisters the provider when disposed. */ export function registerMcpServerDefinitionProvider(id: string, provider: McpServerDefinitionProvider): Disposable; + + /** + * Registers a {@linkcode LanguageModelChatProvider} + * Note: You must also define the language model chat provider via the `languageModelChatProviders` contribution point in package.json + * @param vendor The vendor for this provider. Must be globally unique. An example is `copilot` or `openai`. + * @param provider The provider to register + * @returns A disposable that unregisters the provider when disposed + */ + export function registerLanguageModelChatProvider(vendor: string, provider: LanguageModelChatProvider): Disposable; } /** @@ -20595,7 +20813,7 @@ declare module 'vscode' { /** * Construct a prompt-tsx part with the given content. - * @param value The value of the part, the result of `renderPromptElementJSON` from `@vscode/prompt-tsx`. + * @param value The value of the part, the result of `renderElementJSON` from `@vscode/prompt-tsx`. */ constructor(value: unknown); } diff --git a/chat-model-provider-sample/tsconfig.json b/chat-model-provider-sample/tsconfig.json index 910f3ba646..fcf51c9cef 100644 --- a/chat-model-provider-sample/tsconfig.json +++ b/chat-model-provider-sample/tsconfig.json @@ -14,12 +14,5 @@ // "noImplicitReturns": true, /* Report error when not all code paths in function return a value. */ // "noFallthroughCasesInSwitch": true, /* Report errors for fallthrough cases in switch statement. */ // "noUnusedParameters": true, /* Report errors on unused parameters. */ - }, - "include": [ - "src/**/*", - "vscode.proposed.chatProvider.d.ts" - ], - "exclude": [ - "vscode.d.ts" - ] + } } diff --git a/chat-model-provider-sample/vscode.proposed.chatProvider.d.ts b/chat-model-provider-sample/vscode.proposed.chatProvider.d.ts deleted file mode 100644 index 18e53e0a1c..0000000000 --- a/chat-model-provider-sample/vscode.proposed.chatProvider.d.ts +++ /dev/null @@ -1,144 +0,0 @@ -/*--------------------------------------------------------------------------------------------- - * Copyright (c) Microsoft Corporation. All rights reserved. - * Licensed under the MIT License. See License.txt in the project root for license information. - *--------------------------------------------------------------------------------------------*/ - -declare module 'vscode' { - - - // @API extension ship a d.ts files for their options - - // @API the LanguageModelChatProvider2 is an alternative that combines a source, like ollama etc, with - // concrete models. The `provideLanguageModelChatData` would do the discovery and auth dances and later - // the model data is passed to the concrete function for making a requested or counting token - - - // TODO@API name scheme - export interface LanguageModelChatRequestHandleOptions { - - // initiator - readonly extensionId: string; - - /** - * A set of options that control the behavior of the language model. These options are specific to the language model - * and need to be looked up in the respective documentation. - */ - readonly modelOptions: { [name: string]: any }; - - /** - * An optional list of tools that are available to the language model. These could be registered tools available via - * {@link lm.tools}, or private tools that are just implemented within the calling extension. - * - * If the LLM requests to call one of these tools, it will return a {@link LanguageModelToolCallPart} in - * {@link LanguageModelChatResponse.stream}. It's the caller's responsibility to invoke the tool. If it's a tool - * registered in {@link lm.tools}, that means calling {@link lm.invokeTool}. - * - * Then, the tool result can be provided to the LLM by creating an Assistant-type {@link LanguageModelChatMessage} with a - * {@link LanguageModelToolCallPart}, followed by a User-type message with a {@link LanguageModelToolResultPart}. - */ - tools?: LanguageModelChatTool[]; - - /** - * The tool-selecting mode to use. {@link LanguageModelChatToolMode.Auto} by default. - */ - toolMode?: LanguageModelChatToolMode; - } - - // TODO@API names: LanguageModelChatMetadata, LanguageModelChatItem - export interface LanguageModelChatInformation { - - readonly id: string; - - /** - * Human-readable name of the language model. - */ - readonly name: string; - /** - * Opaque family-name of the language model. Values might be `gpt-3.5-turbo`, `gpt4`, `phi2`, or `llama` - * but they are defined by extensions contributing languages and subject to change. - */ - readonly family: string; - - /** - * An optional, human-readable description of the language model. - */ - readonly description?: string; - - /** - * An optional, human-readable string representing the cost of using the language model. - */ - readonly cost?: string; - - /** - * Opaque version string of the model. This is defined by the extension contributing the language model - * and subject to change while the identifier is stable. - */ - readonly version: string; - - readonly maxInputTokens: number; - - readonly maxOutputTokens: number; - - /** - * When present, this gates the use of `requestLanguageModelAccess` behind an authorization flow where - * the user must approve of another extension accessing the models contributed by this extension. - * Additionally, the extension can provide a label that will be shown in the UI. - */ - auth?: true | { label: string }; - - // TODO@API maybe an enum, LanguageModelChatProviderPickerAvailability? - // TODO@API isPreselected proposed - readonly isDefault?: boolean; - - // TODO@API nuke - readonly isUserSelectable?: boolean; - - readonly capabilities?: { - - // TODO@API have mimeTypes that you support - readonly vision?: boolean; - - // TODO@API should be `boolean | number` so extensions can express how many tools they support - readonly toolCalling?: boolean | number; - - // TODO@API DO NOT SUPPORT THIS - // readonly agentMode?: boolean; - - // TODO@API support prompt TSX style messages, MAYBE leave it out for now - readonly promptTsx?: boolean; - }; - - /** - * Optional category to group models by in the model picker. - * The lower the order, the higher the category appears in the list. - * Has no effect if `isUserSelectable` is `false`. - * If not specified, the model will appear in the "Other Models" category. - */ - readonly category?: { label: string; order: number }; - } - - export interface LanguageModelChatProvider2 { - - // signals a change from the provider to the editor so that prepareLanguageModelChat is called again - onDidChange?: Event; - - // NOT cacheable (between reloads) - prepareLanguageModelChat(options: { silent: boolean }, token: CancellationToken): ProviderResult; - - provideLanguageModelChatResponse(model: T, messages: Array, options: LanguageModelChatRequestHandleOptions, progress: Progress, token: CancellationToken): Thenable; - - provideTokenCount(model: T, text: string | LanguageModelChatMessage | LanguageModelChatMessage2, token: CancellationToken): Thenable; - } - - export namespace lm { - - export function registerChatModelProvider(vendor: string, provider: LanguageModelChatProvider2): Disposable; - } - - - - export interface ChatResponseFragment2 { - index: number; - part: LanguageModelTextPart | LanguageModelToolCallPart; - } -}