Skip to content

Commit 7c68cc0

Browse files
committed
Add metadata to create message
1 parent 568fb87 commit 7c68cc0

22 files changed

+161
-40
lines changed

.changeset/hot-lies-flash.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
"roo-cline": patch
3+
---
4+
5+
Add metadata to create message

src/api/index.ts

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,8 +31,17 @@ export interface SingleCompletionHandler {
3131
completePrompt(prompt: string): Promise<string>
3232
}
3333

34+
export interface ApiHandlerCreateMessageMetadata {
35+
mode?: string
36+
taskId: string
37+
}
38+
3439
export interface ApiHandler {
35-
createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream
40+
createMessage(
41+
systemPrompt: string,
42+
messages: Anthropic.Messages.MessageParam[],
43+
metadata?: ApiHandlerCreateMessageMetadata,
44+
): ApiStream
3645

3746
getModel(): { id: string; info: ModelInfo }
3847

src/api/providers/anthropic-vertex.ts

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ import { safeJsonParse } from "../../shared/safeJsonParse"
88
import { ApiStream } from "../transform/stream"
99
import { addCacheBreakpoints } from "../transform/caching/vertex"
1010

11-
import { getModelParams, SingleCompletionHandler } from "../index"
11+
import { getModelParams, SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
1212
import { ANTHROPIC_DEFAULT_MAX_TOKENS } from "./constants"
1313
import { BaseProvider } from "./base-provider"
1414

@@ -49,7 +49,11 @@ export class AnthropicVertexHandler extends BaseProvider implements SingleComple
4949
}
5050
}
5151

52-
override async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
52+
override async *createMessage(
53+
systemPrompt: string,
54+
messages: Anthropic.Messages.MessageParam[],
55+
metadata?: ApiHandlerCreateMessageMetadata,
56+
): ApiStream {
5357
let {
5458
id,
5559
info: { supportsPromptCache },

src/api/providers/anthropic.ts

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ import {
1111
import { ApiStream } from "../transform/stream"
1212
import { BaseProvider } from "./base-provider"
1313
import { ANTHROPIC_DEFAULT_MAX_TOKENS } from "./constants"
14-
import { SingleCompletionHandler, getModelParams } from "../index"
14+
import { SingleCompletionHandler, getModelParams, ApiHandlerCreateMessageMetadata } from "../index"
1515

1616
export class AnthropicHandler extends BaseProvider implements SingleCompletionHandler {
1717
private options: ApiHandlerOptions
@@ -30,7 +30,11 @@ export class AnthropicHandler extends BaseProvider implements SingleCompletionHa
3030
})
3131
}
3232

33-
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
33+
async *createMessage(
34+
systemPrompt: string,
35+
messages: Anthropic.Messages.MessageParam[],
36+
metadata?: ApiHandlerCreateMessageMetadata,
37+
): ApiStream {
3438
let stream: AnthropicStream<Anthropic.Messages.RawMessageStreamEvent>
3539
const cacheControl: CacheControlEphemeral = { type: "ephemeral" }
3640
let { id: modelId, maxTokens, thinking, temperature, virtualId } = this.getModel()

src/api/providers/base-openai-compatible-provider.ts

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ import { ApiHandlerOptions, ModelInfo } from "../../shared/api"
55
import { ApiStream } from "../transform/stream"
66
import { convertToOpenAiMessages } from "../transform/openai-format"
77

8-
import { SingleCompletionHandler } from "../index"
8+
import { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
99
import { DEFAULT_HEADERS } from "./constants"
1010
import { BaseProvider } from "./base-provider"
1111

@@ -60,7 +60,11 @@ export abstract class BaseOpenAiCompatibleProvider<ModelName extends string>
6060
})
6161
}
6262

63-
override async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
63+
override async *createMessage(
64+
systemPrompt: string,
65+
messages: Anthropic.Messages.MessageParam[],
66+
metadata?: ApiHandlerCreateMessageMetadata,
67+
): ApiStream {
6468
const {
6569
id: model,
6670
info: { maxTokens: max_tokens },

src/api/providers/base-provider.ts

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,15 +2,19 @@ import { Anthropic } from "@anthropic-ai/sdk"
22

33
import { ModelInfo } from "../../shared/api"
44

5-
import { ApiHandler } from "../index"
5+
import { ApiHandler, ApiHandlerCreateMessageMetadata } from "../index"
66
import { ApiStream } from "../transform/stream"
77
import { countTokens } from "../../utils/countTokens"
88

99
/**
1010
* Base class for API providers that implements common functionality.
1111
*/
1212
export abstract class BaseProvider implements ApiHandler {
13-
abstract createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream
13+
abstract createMessage(
14+
systemPrompt: string,
15+
messages: Anthropic.Messages.MessageParam[],
16+
metadata?: ApiHandlerCreateMessageMetadata,
17+
): ApiStream
1418
abstract getModel(): { id: string; info: ModelInfo }
1519

1620
/**

src/api/providers/bedrock.ts

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ import { MultiPointStrategy } from "../transform/cache-strategy/multi-point-stra
2626
import { ModelInfo as CacheModelInfo } from "../transform/cache-strategy/types"
2727
import { AMAZON_BEDROCK_REGION_INFO } from "../../shared/aws_regions"
2828
import { convertToBedrockConverseMessages as sharedConverter } from "../transform/bedrock-converse-format"
29+
import { ApiHandlerCreateMessageMetadata } from "../index"
2930

3031
const BEDROCK_DEFAULT_TEMPERATURE = 0.3
3132
const BEDROCK_MAX_TOKENS = 4096
@@ -189,7 +190,11 @@ export class AwsBedrockHandler extends BaseProvider implements SingleCompletionH
189190
this.client = new BedrockRuntimeClient(clientConfig)
190191
}
191192

192-
override async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
193+
override async *createMessage(
194+
systemPrompt: string,
195+
messages: Anthropic.Messages.MessageParam[],
196+
metadata?: ApiHandlerCreateMessageMetadata,
197+
): ApiStream {
193198
let modelConfig = this.getModel()
194199
// Handle cross-region inference
195200
const usePromptCache = Boolean(this.options.awsUsePromptCache && this.supportsAwsPromptCache(modelConfig))

src/api/providers/fake-ai.ts

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import { Anthropic } from "@anthropic-ai/sdk"
2-
import { ApiHandler, SingleCompletionHandler } from ".."
2+
import { ApiHandler, SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from ".."
33
import { ApiHandlerOptions, ModelInfo } from "../../shared/api"
44
import { ApiStream } from "../transform/stream"
55

@@ -18,7 +18,11 @@ interface FakeAI {
1818
*/
1919
removeFromCache?: () => void
2020

21-
createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream
21+
createMessage(
22+
systemPrompt: string,
23+
messages: Anthropic.Messages.MessageParam[],
24+
metadata?: ApiHandlerCreateMessageMetadata,
25+
): ApiStream
2226
getModel(): { id: string; info: ModelInfo }
2327
countTokens(content: Array<Anthropic.Messages.ContentBlockParam>): Promise<number>
2428
completePrompt(prompt: string): Promise<string>
@@ -52,8 +56,12 @@ export class FakeAIHandler implements ApiHandler, SingleCompletionHandler {
5256
this.ai = cachedFakeAi
5357
}
5458

55-
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
56-
yield* this.ai.createMessage(systemPrompt, messages)
59+
async *createMessage(
60+
systemPrompt: string,
61+
messages: Anthropic.Messages.MessageParam[],
62+
metadata?: ApiHandlerCreateMessageMetadata,
63+
): ApiStream {
64+
yield* this.ai.createMessage(systemPrompt, messages, metadata)
5765
}
5866

5967
getModel(): { id: string; info: ModelInfo } {

src/api/providers/gemini.ts

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ import type { JWTInput } from "google-auth-library"
1010
import { ApiHandlerOptions, ModelInfo, GeminiModelId, geminiDefaultModelId, geminiModels } from "../../shared/api"
1111
import { safeJsonParse } from "../../shared/safeJsonParse"
1212

13-
import { SingleCompletionHandler } from "../index"
13+
import { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
1414
import { convertAnthropicContentToGemini, convertAnthropicMessageToGemini } from "../transform/gemini-format"
1515
import type { ApiStream } from "../transform/stream"
1616
import { BaseProvider } from "./base-provider"
@@ -54,7 +54,11 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl
5454
: new GoogleGenAI({ apiKey })
5555
}
5656

57-
async *createMessage(systemInstruction: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
57+
async *createMessage(
58+
systemInstruction: string,
59+
messages: Anthropic.Messages.MessageParam[],
60+
metadata?: ApiHandlerCreateMessageMetadata,
61+
): ApiStream {
5862
const { id: model, thinkingConfig, maxOutputTokens, info } = this.getModel()
5963

6064
const contents = messages.map(convertAnthropicMessageToGemini)

src/api/providers/glama.ts

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ import { ApiStream } from "../transform/stream"
99
import { convertToOpenAiMessages } from "../transform/openai-format"
1010
import { addCacheBreakpoints } from "../transform/caching/anthropic"
1111

12-
import { SingleCompletionHandler } from "../index"
12+
import { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
1313
import { RouterProvider } from "./router-provider"
1414

1515
const GLAMA_DEFAULT_TEMPERATURE = 0
@@ -33,7 +33,11 @@ export class GlamaHandler extends RouterProvider implements SingleCompletionHand
3333
})
3434
}
3535

36-
override async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
36+
override async *createMessage(
37+
systemPrompt: string,
38+
messages: Anthropic.Messages.MessageParam[],
39+
metadata?: ApiHandlerCreateMessageMetadata,
40+
): ApiStream {
3741
const { id: modelId, info } = await this.fetchModel()
3842

3943
const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [

src/api/providers/human-relay.ts

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ import * as vscode from "vscode"
44
import { ModelInfo } from "../../shared/api"
55
import { getCommand } from "../../utils/commands"
66
import { ApiStream } from "../transform/stream"
7-
import { ApiHandler, SingleCompletionHandler } from "../index"
7+
import { ApiHandler, SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
88
/**
99
* Human Relay API processor
1010
* This processor does not directly call the API, but interacts with the model through human operations copy and paste.
@@ -18,8 +18,13 @@ export class HumanRelayHandler implements ApiHandler, SingleCompletionHandler {
1818
* Create a message processing flow, display a dialog box to request human assistance
1919
* @param systemPrompt System prompt words
2020
* @param messages Message list
21+
* @param metadata Optional metadata
2122
*/
22-
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
23+
async *createMessage(
24+
systemPrompt: string,
25+
messages: Anthropic.Messages.MessageParam[],
26+
metadata?: ApiHandlerCreateMessageMetadata,
27+
): ApiStream {
2328
// Get the most recent user message
2429
const latestMessage = messages[messages.length - 1]
2530

src/api/providers/litellm.ts

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ import { Anthropic } from "@anthropic-ai/sdk" // Keep for type usage only
44
import { ApiHandlerOptions, litellmDefaultModelId, litellmDefaultModelInfo } from "../../shared/api"
55
import { ApiStream, ApiStreamUsageChunk } from "../transform/stream"
66
import { convertToOpenAiMessages } from "../transform/openai-format"
7-
import { SingleCompletionHandler } from "../index"
7+
import { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
88
import { RouterProvider } from "./router-provider"
99

1010
/**
@@ -26,7 +26,11 @@ export class LiteLLMHandler extends RouterProvider implements SingleCompletionHa
2626
})
2727
}
2828

29-
override async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
29+
override async *createMessage(
30+
systemPrompt: string,
31+
messages: Anthropic.Messages.MessageParam[],
32+
metadata?: ApiHandlerCreateMessageMetadata,
33+
): ApiStream {
3034
const { id: modelId, info } = await this.fetchModel()
3135

3236
const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [

src/api/providers/lmstudio.ts

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ import { convertToOpenAiMessages } from "../transform/openai-format"
88
import { ApiStream } from "../transform/stream"
99
import { BaseProvider } from "./base-provider"
1010
import { XmlMatcher } from "../../utils/xml-matcher"
11+
import { ApiHandlerCreateMessageMetadata } from "../index"
1112

1213
const LMSTUDIO_DEFAULT_TEMPERATURE = 0
1314

@@ -24,7 +25,11 @@ export class LmStudioHandler extends BaseProvider implements SingleCompletionHan
2425
})
2526
}
2627

27-
override async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
28+
override async *createMessage(
29+
systemPrompt: string,
30+
messages: Anthropic.Messages.MessageParam[],
31+
metadata?: ApiHandlerCreateMessageMetadata,
32+
): ApiStream {
2833
const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
2934
{ role: "system", content: systemPrompt },
3035
...convertToOpenAiMessages(messages),

src/api/providers/mistral.ts

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ import { ApiHandlerOptions, mistralDefaultModelId, MistralModelId, mistralModels
55
import { convertToMistralMessages } from "../transform/mistral-format"
66
import { ApiStream } from "../transform/stream"
77
import { BaseProvider } from "./base-provider"
8+
import { ApiHandlerCreateMessageMetadata } from "../index"
89

910
const MISTRAL_DEFAULT_TEMPERATURE = 0
1011

@@ -41,7 +42,13 @@ export class MistralHandler extends BaseProvider implements SingleCompletionHand
4142
return "https://api.mistral.ai"
4243
}
4344

44-
override async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
45+
override async *createMessage(
46+
systemPrompt: string,
47+
messages: Anthropic.Messages.MessageParam[],
48+
metadata?: ApiHandlerCreateMessageMetadata,
49+
): ApiStream {
50+
const { id: model } = this.getModel()
51+
4552
const response = await this.client.chat.stream({
4653
model: this.options.apiModelId || mistralDefaultModelId,
4754
messages: [{ role: "system", content: systemPrompt }, ...convertToMistralMessages(messages)],

src/api/providers/ollama.ts

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ import { Anthropic } from "@anthropic-ai/sdk"
22
import OpenAI from "openai"
33
import axios from "axios"
44

5-
import { SingleCompletionHandler } from "../"
5+
import { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../"
66
import { ApiHandlerOptions, ModelInfo, openAiModelInfoSaneDefaults } from "../../shared/api"
77
import { convertToOpenAiMessages } from "../transform/openai-format"
88
import { convertToR1Format } from "../transform/r1-format"
@@ -27,7 +27,11 @@ export class OllamaHandler extends BaseProvider implements SingleCompletionHandl
2727
})
2828
}
2929

30-
override async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
30+
override async *createMessage(
31+
systemPrompt: string,
32+
messages: Anthropic.Messages.MessageParam[],
33+
metadata?: ApiHandlerCreateMessageMetadata,
34+
): ApiStream {
3135
const modelId = this.getModel().id
3236
const useR1Format = modelId.toLowerCase().includes("deepseek-r1")
3337
const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [

src/api/providers/openai-native.ts

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import { Anthropic } from "@anthropic-ai/sdk"
22
import OpenAI from "openai"
3-
import { SingleCompletionHandler } from "../"
3+
import { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../"
44
import {
55
ApiHandlerOptions,
66
ModelInfo,
@@ -32,7 +32,11 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio
3232
this.client = new OpenAI({ baseURL: this.options.openAiNativeBaseUrl, apiKey })
3333
}
3434

35-
override async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
35+
override async *createMessage(
36+
systemPrompt: string,
37+
messages: Anthropic.Messages.MessageParam[],
38+
metadata?: ApiHandlerCreateMessageMetadata,
39+
): ApiStream {
3640
const model = this.getModel()
3741

3842
if (model.id.startsWith("o1")) {

src/api/providers/openai.ts

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ import {
88
ModelInfo,
99
openAiModelInfoSaneDefaults,
1010
} from "../../shared/api"
11-
import { SingleCompletionHandler } from "../index"
11+
import { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
1212
import { convertToOpenAiMessages } from "../transform/openai-format"
1313
import { convertToR1Format } from "../transform/r1-format"
1414
import { convertToSimpleMessages } from "../transform/simple-format"
@@ -67,7 +67,11 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
6767
}
6868
}
6969

70-
override async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
70+
override async *createMessage(
71+
systemPrompt: string,
72+
messages: Anthropic.Messages.MessageParam[],
73+
metadata?: ApiHandlerCreateMessageMetadata,
74+
): ApiStream {
7175
const modelInfo = this.getModel().info
7276
const modelUrl = this.options.openAiBaseUrl ?? ""
7377
const modelId = this.options.openAiModelId ?? ""

0 commit comments

Comments
 (0)