Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .changeset/clever-toes-swim.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
'@ai-sdk/azure': patch
---

Update default Azure Provider to use Responses API
5 changes: 5 additions & 0 deletions content/providers/01-ai-sdk-providers/04-azure.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,11 @@ const enhancedModel = wrapLanguageModel({

You can then use that enhanced model in functions like `generateText` and `streamText`.

<Note>
The Azure provider calls the Responses API by default (unless you specify e.g.
`azure.chat`).
</Note>

### Example

You can use OpenAI language models to generate text with the `generateText` function:
Expand Down
21 changes: 21 additions & 0 deletions examples/ai-core/src/generate-text/azure-responses-default.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
import 'dotenv/config';
import { createAzure } from '@ai-sdk/azure';
import { generateText } from 'ai';

const azureDefault = createAzure({
fetch: async (input, init) => {
console.log('Azure request URL:', input);
return fetch(input, init);
},
});

async function main() {
const result = await generateText({
model: azureDefault('gpt-5-nano'),
prompt: 'Write a short poem about the sea.',
});

console.log(result.text);
}

main().catch(console.error);
120 changes: 116 additions & 4 deletions packages/azure/src/azure-openai-provider.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,118 @@ const server = createTestServer({
{},
});

describe('responses (default language model)', () => {
describe('doGenerate', () => {
function prepareJsonResponse({
content = '',
usage = {
input_tokens: 4,
output_tokens: 30,
total_tokens: 34,
},
} = {}) {
server.urls[
'https://test-resource.openai.azure.com/openai/v1/responses'
].response = {
type: 'json-value',
body: {
id: 'resp_67c97c0203188190a025beb4a75242bc',
object: 'response',
created_at: 1741257730,
status: 'completed',
model: 'test-deployment',
output: [
{
id: 'msg_67c97c02656c81908e080dfdf4a03cd1',
type: 'message',
status: 'completed',
role: 'assistant',
content: [
{
type: 'output_text',
text: content,
annotations: [],
},
],
},
],
usage,
incomplete_details: null,
},
};
}

it('should set the correct default api version', async () => {
prepareJsonResponse();

await provider('test-deployment').doGenerate({
prompt: TEST_PROMPT,
});

expect(
server.calls[0].requestUrlSearchParams.get('api-version'),
).toStrictEqual('v1');
});

it('should set the correct modified api version', async () => {
prepareJsonResponse();

await providerApiVersionChanged('test-deployment').doGenerate({
prompt: TEST_PROMPT,
});

expect(
server.calls[0].requestUrlSearchParams.get('api-version'),
).toStrictEqual('2025-04-01-preview');
});

it('should pass headers', async () => {
prepareJsonResponse();

const provider = createAzure({
resourceName: 'test-resource',
apiKey: 'test-api-key',
headers: {
'Custom-Provider-Header': 'provider-header-value',
},
});

await provider('test-deployment').doGenerate({
prompt: TEST_PROMPT,
headers: {
'Custom-Request-Header': 'request-header-value',
},
});

expect(server.calls[0].requestHeaders).toStrictEqual({
'api-key': 'test-api-key',
'content-type': 'application/json',
'custom-provider-header': 'provider-header-value',
'custom-request-header': 'request-header-value',
});
expect(server.calls[0].requestUserAgent).toContain(
`ai-sdk/azure/0.0.0-test`,
);
});

it('should use the baseURL correctly', async () => {
prepareJsonResponse();

const provider = createAzure({
baseURL: 'https://test-resource.openai.azure.com/openai',
apiKey: 'test-api-key',
});

await provider('test-deployment').doGenerate({
prompt: TEST_PROMPT,
});
expect(server.calls[0].requestUrl).toStrictEqual(
'https://test-resource.openai.azure.com/openai/v1/responses?api-version=v1',
);
});
});
});

describe('chat', () => {
describe('doGenerate', () => {
function prepareJsonResponse({ content = '' }: { content?: string } = {}) {
Expand Down Expand Up @@ -72,7 +184,7 @@ describe('chat', () => {
it('should set the correct default api version', async () => {
prepareJsonResponse();

await provider('test-deployment').doGenerate({
await provider.chat('test-deployment').doGenerate({
prompt: TEST_PROMPT,
});

Expand All @@ -84,7 +196,7 @@ describe('chat', () => {
it('should set the correct modified api version', async () => {
prepareJsonResponse();

await providerApiVersionChanged('test-deployment').doGenerate({
await providerApiVersionChanged.chat('test-deployment').doGenerate({
prompt: TEST_PROMPT,
});

Expand All @@ -104,7 +216,7 @@ describe('chat', () => {
},
});

await provider('test-deployment').doGenerate({
await provider.chat('test-deployment').doGenerate({
prompt: TEST_PROMPT,
headers: {
'Custom-Request-Header': 'request-header-value',
Expand All @@ -130,7 +242,7 @@ describe('chat', () => {
apiKey: 'test-api-key',
});

await provider('test-deployment').doGenerate({
await provider.chat('test-deployment').doGenerate({
prompt: TEST_PROMPT,
});
expect(server.calls[0].requestUrl).toStrictEqual(
Expand Down
6 changes: 3 additions & 3 deletions packages/azure/src/azure-openai-provider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ export interface AzureOpenAIProvider extends ProviderV3 {
(deploymentId: string): LanguageModelV3;

/**
Creates an Azure OpenAI chat model for text generation.
Creates an Azure OpenAI responses API model for text generation.
*/
languageModel(deploymentId: string): LanguageModelV3;

Expand Down Expand Up @@ -239,11 +239,11 @@ export function createAzure(
);
}

return createChatModel(deploymentId);
return createResponsesModel(deploymentId);
};

provider.specificationVersion = 'v3' as const;
provider.languageModel = createChatModel;
provider.languageModel = createResponsesModel;
provider.chat = createChatModel;
provider.completion = createCompletionModel;
provider.embedding = createEmbeddingModel;
Expand Down
Loading