|
| 1 | +# Copyright (c) Meta Platforms, Inc. and affiliates. |
| 2 | +# All rights reserved. |
| 3 | +# |
| 4 | +# This source code is licensed under the terms described in the LICENSE file in |
| 5 | +# the root directory of this source tree. |
| 6 | + |
| 7 | +import os |
| 8 | +from unittest.mock import AsyncMock, MagicMock, patch |
| 9 | + |
| 10 | +from llama_stack.distribution.stack import replace_env_vars |
| 11 | +from llama_stack.providers.remote.inference.openai.config import OpenAIConfig |
| 12 | +from llama_stack.providers.remote.inference.openai.openai import OpenAIInferenceAdapter |
| 13 | + |
| 14 | + |
| 15 | +class TestOpenAIBaseURLConfig: |
| 16 | + """Test that OPENAI_BASE_URL environment variable properly configures the OpenAI adapter.""" |
| 17 | + |
| 18 | + def test_default_base_url_without_env_var(self): |
| 19 | + """Test that the adapter uses the default OpenAI base URL when no environment variable is set.""" |
| 20 | + config = OpenAIConfig(api_key="test-key") |
| 21 | + adapter = OpenAIInferenceAdapter(config) |
| 22 | + |
| 23 | + assert adapter.get_base_url() == "https://api.openai.com/v1" |
| 24 | + |
| 25 | + def test_custom_base_url_from_config(self): |
| 26 | + """Test that the adapter uses a custom base URL when provided in config.""" |
| 27 | + custom_url = "https://custom.openai.com/v1" |
| 28 | + config = OpenAIConfig(api_key="test-key", base_url=custom_url) |
| 29 | + adapter = OpenAIInferenceAdapter(config) |
| 30 | + |
| 31 | + assert adapter.get_base_url() == custom_url |
| 32 | + |
| 33 | + @patch.dict(os.environ, {"OPENAI_BASE_URL": "https://env.openai.com/v1"}) |
| 34 | + def test_base_url_from_environment_variable(self): |
| 35 | + """Test that the adapter uses base URL from OPENAI_BASE_URL environment variable.""" |
| 36 | + # Use sample_run_config which has proper environment variable syntax |
| 37 | + config_data = OpenAIConfig.sample_run_config(api_key="test-key") |
| 38 | + processed_config = replace_env_vars(config_data) |
| 39 | + config = OpenAIConfig.model_validate(processed_config) |
| 40 | + adapter = OpenAIInferenceAdapter(config) |
| 41 | + |
| 42 | + assert adapter.get_base_url() == "https://env.openai.com/v1" |
| 43 | + |
| 44 | + @patch.dict(os.environ, {"OPENAI_BASE_URL": "https://env.openai.com/v1"}) |
| 45 | + def test_config_overrides_environment_variable(self): |
| 46 | + """Test that explicit config value overrides environment variable.""" |
| 47 | + custom_url = "https://config.openai.com/v1" |
| 48 | + config = OpenAIConfig(api_key="test-key", base_url=custom_url) |
| 49 | + adapter = OpenAIInferenceAdapter(config) |
| 50 | + |
| 51 | + # Config should take precedence over environment variable |
| 52 | + assert adapter.get_base_url() == custom_url |
| 53 | + |
| 54 | + @patch("llama_stack.providers.utils.inference.openai_mixin.AsyncOpenAI") |
| 55 | + def test_client_uses_configured_base_url(self, mock_openai_class): |
| 56 | + """Test that the OpenAI client is initialized with the configured base URL.""" |
| 57 | + custom_url = "https://test.openai.com/v1" |
| 58 | + config = OpenAIConfig(api_key="test-key", base_url=custom_url) |
| 59 | + adapter = OpenAIInferenceAdapter(config) |
| 60 | + |
| 61 | + # Mock the get_api_key method since it's delegated to LiteLLMOpenAIMixin |
| 62 | + adapter.get_api_key = MagicMock(return_value="test-key") |
| 63 | + |
| 64 | + # Access the client property to trigger AsyncOpenAI initialization |
| 65 | + _ = adapter.client |
| 66 | + |
| 67 | + # Verify AsyncOpenAI was called with the correct base_url |
| 68 | + mock_openai_class.assert_called_once_with( |
| 69 | + api_key="test-key", |
| 70 | + base_url=custom_url, |
| 71 | + ) |
| 72 | + |
| 73 | + @patch("llama_stack.providers.utils.inference.openai_mixin.AsyncOpenAI") |
| 74 | + async def test_check_model_availability_uses_configured_url(self, mock_openai_class): |
| 75 | + """Test that check_model_availability uses the configured base URL.""" |
| 76 | + custom_url = "https://test.openai.com/v1" |
| 77 | + config = OpenAIConfig(api_key="test-key", base_url=custom_url) |
| 78 | + adapter = OpenAIInferenceAdapter(config) |
| 79 | + |
| 80 | + # Mock the get_api_key method |
| 81 | + adapter.get_api_key = MagicMock(return_value="test-key") |
| 82 | + |
| 83 | + # Mock the AsyncOpenAI client and its models.retrieve method |
| 84 | + mock_client = MagicMock() |
| 85 | + mock_client.models.retrieve = AsyncMock(return_value=MagicMock()) |
| 86 | + mock_openai_class.return_value = mock_client |
| 87 | + |
| 88 | + # Call check_model_availability and verify it returns True |
| 89 | + assert await adapter.check_model_availability("gpt-4") |
| 90 | + |
| 91 | + # Verify the client was created with the custom URL |
| 92 | + mock_openai_class.assert_called_with( |
| 93 | + api_key="test-key", |
| 94 | + base_url=custom_url, |
| 95 | + ) |
| 96 | + |
| 97 | + # Verify the method was called and returned True |
| 98 | + mock_client.models.retrieve.assert_called_once_with("gpt-4") |
| 99 | + |
| 100 | + @patch.dict(os.environ, {"OPENAI_BASE_URL": "https://proxy.openai.com/v1"}) |
| 101 | + @patch("llama_stack.providers.utils.inference.openai_mixin.AsyncOpenAI") |
| 102 | + async def test_environment_variable_affects_model_availability_check(self, mock_openai_class): |
| 103 | + """Test that setting OPENAI_BASE_URL environment variable affects where model availability is checked.""" |
| 104 | + # Use sample_run_config which has proper environment variable syntax |
| 105 | + config_data = OpenAIConfig.sample_run_config(api_key="test-key") |
| 106 | + processed_config = replace_env_vars(config_data) |
| 107 | + config = OpenAIConfig.model_validate(processed_config) |
| 108 | + adapter = OpenAIInferenceAdapter(config) |
| 109 | + |
| 110 | + # Mock the get_api_key method |
| 111 | + adapter.get_api_key = MagicMock(return_value="test-key") |
| 112 | + |
| 113 | + # Mock the AsyncOpenAI client |
| 114 | + mock_client = MagicMock() |
| 115 | + mock_client.models.retrieve = AsyncMock(return_value=MagicMock()) |
| 116 | + mock_openai_class.return_value = mock_client |
| 117 | + |
| 118 | + # Call check_model_availability and verify it returns True |
| 119 | + assert await adapter.check_model_availability("gpt-4") |
| 120 | + |
| 121 | + # Verify the client was created with the environment variable URL |
| 122 | + mock_openai_class.assert_called_with( |
| 123 | + api_key="test-key", |
| 124 | + base_url="https://proxy.openai.com/v1", |
| 125 | + ) |
0 commit comments