Skip to content

Commit 65d45c7

Browse files
authored
chore: various watsonx fixes (#3428)
# What does this PR do? use a logger * update the distro to add the Files API otherwise it won't start since it is a dependency of vector * clarify project_id and api_key requirements * disable openai compatible calls since the endpoint returns 404 * disable text_inference structured format tests * fixed openai client initialization ## Test Plan Execute text_inference: ``` WATSONX_API_KEY=... WATSONX_PROJECT_ID=... python -m llama_stack.core.server.server llama_stack/distributions/watsonx/run.yaml LLAMA_STACK_CONFIG=http://localhost:8321 uv run --group test pytest -vvvv -ra --text-model watsonx/meta-llama/llama-3-3-70b-instruct tests/integration/inference/test_text_inference.py ============================================= test session starts ============================================== platform darwin -- Python 3.12.8, pytest-8.4.2, pluggy-1.6.0 -- /Users/leseb/Documents/AI/llama-stack/.venv/bin/python3 cachedir: .pytest_cache metadata: {'Python': '3.12.8', 'Platform': 'macOS-15.6.1-arm64-arm-64bit', 'Packages': {'pytest': '8.4.2', 'pluggy': '1.6.0'}, 'Plugins': {'anyio': '4.9.0', 'html': '4.1.1', 'socket': '0.7.0', 'asyncio': '1.1.0', 'json-report': '1.5.0', 'timeout': '2.4.0', 'metadata': '3.1.1', 'cov': '6.2.1', 'nbval': '0.11.0', 'hydra-core': '1.3.2'}} rootdir: /Users/leseb/Documents/AI/llama-stack configfile: pyproject.toml plugins: anyio-4.9.0, html-4.1.1, socket-0.7.0, asyncio-1.1.0, json-report-1.5.0, timeout-2.4.0, metadata-3.1.1, cov-6.2.1, nbval-0.11.0, hydra-core-1.3.2 asyncio: mode=Mode.AUTO, asyncio_default_fixture_loop_scope=None, asyncio_default_test_loop_scope=function collected 20 items tests/integration/inference/test_text_inference.py::test_text_completion_non_streaming[txt=watsonx/meta-llama/llama-3-3-70b-instruct-inference:completion:sanity] PASSED [ 5%] tests/integration/inference/test_text_inference.py::test_text_completion_streaming[txt=watsonx/meta-llama/llama-3-3-70b-instruct-inference:completion:sanity] PASSED [ 10%] tests/integration/inference/test_text_inference.py::test_text_completion_stop_sequence[txt=watsonx/meta-llama/llama-3-3-70b-instruct-inference:completion:stop_sequence] XFAIL [ 15%] tests/integration/inference/test_text_inference.py::test_text_completion_log_probs_non_streaming[txt=watsonx/meta-llama/llama-3-3-70b-instruct-inference:completion:log_probs] XFAIL [ 20%] tests/integration/inference/test_text_inference.py::test_text_completion_log_probs_streaming[txt=watsonx/meta-llama/llama-3-3-70b-instruct-inference:completion:log_probs] XFAIL [ 25%] tests/integration/inference/test_text_inference.py::test_text_completion_structured_output[txt=watsonx/meta-llama/llama-3-3-70b-instruct-inference:completion:structured_output] SKIPPED structured output) [ 30%] tests/integration/inference/test_text_inference.py::test_text_chat_completion_non_streaming[txt=watsonx/meta-llama/llama-3-3-70b-instruct-inference:chat_completion:non_streaming_01] PASSED [ 35%] tests/integration/inference/test_text_inference.py::test_text_chat_completion_streaming[txt=watsonx/meta-llama/llama-3-3-70b-instruct-inference:chat_completion:streaming_01] PASSED [ 40%] tests/integration/inference/test_text_inference.py::test_text_chat_completion_with_tool_calling_and_non_streaming[txt=watsonx/meta-llama/llama-3-3-70b-instruct-inference:chat_completion:tool_calling] PASSED [ 45%] tests/integration/inference/test_text_inference.py::test_text_chat_completion_with_tool_calling_and_streaming[txt=watsonx/meta-llama/llama-3-3-70b-instruct-inference:chat_completion:tool_calling] PASSED [ 50%] tests/integration/inference/test_text_inference.py::test_text_chat_completion_with_tool_choice_required[txt=watsonx/meta-llama/llama-3-3-70b-instruct-inference:chat_completion:tool_calling] PASSED [ 55%] tests/integration/inference/test_text_inference.py::test_text_chat_completion_with_tool_choice_none[txt=watsonx/meta-llama/llama-3-3-70b-instruct-inference:chat_completion:tool_calling] PASSED [ 60%] tests/integration/inference/test_text_inference.py::test_text_chat_completion_structured_output[txt=watsonx/meta-llama/llama-3-3-70b-instruct-inference:chat_completion:structured_output] SKIPPEDstructured output) [ 65%] tests/integration/inference/test_text_inference.py::test_text_chat_completion_tool_calling_tools_not_in_request[txt=watsonx/meta-llama/llama-3-3-70b-instruct-inference:chat_completion:tool_calling_tools_absent-True] PASSED [ 70%] tests/integration/inference/test_text_inference.py::test_text_chat_completion_with_multi_turn_tool_calling[txt=watsonx/meta-llama/llama-3-3-70b-instruct-inference:chat_completion:text_then_tool] XFAIL [ 75%] tests/integration/inference/test_text_inference.py::test_text_chat_completion_non_streaming[txt=watsonx/meta-llama/llama-3-3-70b-instruct-inference:chat_completion:non_streaming_02] PASSED [ 80%] tests/integration/inference/test_text_inference.py::test_text_chat_completion_streaming[txt=watsonx/meta-llama/llama-3-3-70b-instruct-inference:chat_completion:streaming_02] PASSED [ 85%] tests/integration/inference/test_text_inference.py::test_text_chat_completion_tool_calling_tools_not_in_request[txt=watsonx/meta-llama/llama-3-3-70b-instruct-inference:chat_completion:tool_calling_tools_absent-False] PASSED [ 90%] tests/integration/inference/test_text_inference.py::test_text_chat_completion_with_multi_turn_tool_calling[txt=watsonx/meta-llama/llama-3-3-70b-instruct-inference:chat_completion:tool_then_answer] XFAIL [ 95%] tests/integration/inference/test_text_inference.py::test_text_chat_completion_with_multi_turn_tool_calling[txt=watsonx/meta-llama/llama-3-3-70b-instruct-inference:chat_completion:array_parameter] XFAIL [100%] =========================================== short test summary info ============================================ SKIPPED [2] tests/integration/inference/test_text_inference.py:49: Model watsonx/meta-llama/llama-3-3-70b-instruct hosted by remote::watsonx doesn't support json_schema structured output XFAIL tests/integration/inference/test_text_inference.py::test_text_completion_stop_sequence[txt=watsonx/meta-llama/llama-3-3-70b-instruct-inference:completion:stop_sequence] - remote::watsonx doesn't support 'stop' parameter yet XFAIL tests/integration/inference/test_text_inference.py::test_text_completion_log_probs_non_streaming[txt=watsonx/meta-llama/llama-3-3-70b-instruct-inference:completion:log_probs] - remote::watsonx doesn't support log probs yet XFAIL tests/integration/inference/test_text_inference.py::test_text_completion_log_probs_streaming[txt=watsonx/meta-llama/llama-3-3-70b-instruct-inference:completion:log_probs] - remote::watsonx doesn't support log probs yet XFAIL tests/integration/inference/test_text_inference.py::test_text_chat_completion_with_multi_turn_tool_calling[txt=watsonx/meta-llama/llama-3-3-70b-instruct-inference:chat_completion:text_then_tool] - Not tested for non-llama4 models yet XFAIL tests/integration/inference/test_text_inference.py::test_text_chat_completion_with_multi_turn_tool_calling[txt=watsonx/meta-llama/llama-3-3-70b-instruct-inference:chat_completion:tool_then_answer] - Not tested for non-llama4 models yet XFAIL tests/integration/inference/test_text_inference.py::test_text_chat_completion_with_multi_turn_tool_calling[txt=watsonx/meta-llama/llama-3-3-70b-instruct-inference:chat_completion:array_parameter] - Not tested for non-llama4 models yet ============================ 12 passed, 2 skipped, 6 xfailed, 14 warnings in 36.88s ============================ ``` --------- Signed-off-by: Sébastien Han <[email protected]>
1 parent f4ab154 commit 65d45c7

File tree

7 files changed

+45
-9
lines changed

7 files changed

+45
-9
lines changed

docs/source/providers/inference/remote_watsonx.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,8 @@ IBM WatsonX inference provider for accessing AI models on IBM's WatsonX platform
99
| Field | Type | Required | Default | Description |
1010
|-------|------|----------|---------|-------------|
1111
| `url` | `<class 'str'>` | No | https://us-south.ml.cloud.ibm.com | A base url for accessing the watsonx.ai |
12-
| `api_key` | `pydantic.types.SecretStr \| None` | No | | The watsonx API key, only needed of using the hosted service |
13-
| `project_id` | `str \| None` | No | | The Project ID key, only needed of using the hosted service |
12+
| `api_key` | `pydantic.types.SecretStr \| None` | No | | The watsonx API key |
13+
| `project_id` | `str \| None` | No | | The Project ID key |
1414
| `timeout` | `<class 'int'>` | No | 60 | Timeout for the HTTP requests |
1515

1616
## Sample Configuration

llama_stack/distributions/watsonx/run.yaml

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ apis:
1010
- telemetry
1111
- tool_runtime
1212
- vector_io
13+
- files
1314
providers:
1415
inference:
1516
- provider_id: watsonx
@@ -94,6 +95,14 @@ providers:
9495
provider_type: inline::rag-runtime
9596
- provider_id: model-context-protocol
9697
provider_type: remote::model-context-protocol
98+
files:
99+
- provider_id: meta-reference-files
100+
provider_type: inline::localfs
101+
config:
102+
storage_dir: ${env.FILES_STORAGE_DIR:=~/.llama/distributions/watsonx/files}
103+
metadata_store:
104+
type: sqlite
105+
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/watsonx}/files_metadata.db
97106
metadata_store:
98107
type: sqlite
99108
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/watsonx}/registry.db

llama_stack/distributions/watsonx/watsonx.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,14 +9,15 @@
99
from llama_stack.apis.models import ModelType
1010
from llama_stack.core.datatypes import BuildProvider, ModelInput, Provider, ToolGroupInput
1111
from llama_stack.distributions.template import DistributionTemplate, RunConfigSettings, get_model_registry
12+
from llama_stack.providers.inline.files.localfs.config import LocalfsFilesImplConfig
1213
from llama_stack.providers.inline.inference.sentence_transformers import (
1314
SentenceTransformersInferenceConfig,
1415
)
1516
from llama_stack.providers.remote.inference.watsonx import WatsonXConfig
1617
from llama_stack.providers.remote.inference.watsonx.models import MODEL_ENTRIES
1718

1819

19-
def get_distribution_template() -> DistributionTemplate:
20+
def get_distribution_template(name: str = "watsonx") -> DistributionTemplate:
2021
providers = {
2122
"inference": [
2223
BuildProvider(provider_type="remote::watsonx"),
@@ -42,6 +43,7 @@ def get_distribution_template() -> DistributionTemplate:
4243
BuildProvider(provider_type="inline::rag-runtime"),
4344
BuildProvider(provider_type="remote::model-context-protocol"),
4445
],
46+
"files": [BuildProvider(provider_type="inline::localfs")],
4547
}
4648

4749
inference_provider = Provider(
@@ -79,9 +81,14 @@ def get_distribution_template() -> DistributionTemplate:
7981
},
8082
)
8183

84+
files_provider = Provider(
85+
provider_id="meta-reference-files",
86+
provider_type="inline::localfs",
87+
config=LocalfsFilesImplConfig.sample_run_config(f"~/.llama/distributions/{name}"),
88+
)
8289
default_models, _ = get_model_registry(available_models)
8390
return DistributionTemplate(
84-
name="watsonx",
91+
name=name,
8592
distro_type="remote_hosted",
8693
description="Use watsonx for running LLM inference",
8794
container_image=None,
@@ -92,6 +99,7 @@ def get_distribution_template() -> DistributionTemplate:
9299
"run.yaml": RunConfigSettings(
93100
provider_overrides={
94101
"inference": [inference_provider, embedding_provider],
102+
"files": [files_provider],
95103
},
96104
default_models=default_models + [embedding_model],
97105
default_tool_groups=default_tool_groups,

llama_stack/providers/remote/inference/watsonx/config.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,11 +26,11 @@ class WatsonXConfig(BaseModel):
2626
)
2727
api_key: SecretStr | None = Field(
2828
default_factory=lambda: os.getenv("WATSONX_API_KEY"),
29-
description="The watsonx API key, only needed of using the hosted service",
29+
description="The watsonx API key",
3030
)
3131
project_id: str | None = Field(
3232
default_factory=lambda: os.getenv("WATSONX_PROJECT_ID"),
33-
description="The Project ID key, only needed of using the hosted service",
33+
description="The Project ID key",
3434
)
3535
timeout: int = Field(
3636
default=60,

llama_stack/providers/remote/inference/watsonx/watsonx.py

Lines changed: 18 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@
3838
TopKSamplingStrategy,
3939
TopPSamplingStrategy,
4040
)
41+
from llama_stack.log import get_logger
4142
from llama_stack.providers.utils.inference.model_registry import ModelRegistryHelper
4243
from llama_stack.providers.utils.inference.openai_compat import (
4344
OpenAICompatCompletionChoice,
@@ -57,14 +58,29 @@
5758
from . import WatsonXConfig
5859
from .models import MODEL_ENTRIES
5960

61+
logger = get_logger(name=__name__, category="inference::watsonx")
62+
63+
64+
# Note on structured output
65+
# WatsonX returns responses with a json embedded into a string.
66+
# Examples:
67+
68+
# ChatCompletionResponse(completion_message=CompletionMessage(content='```json\n{\n
69+
# "first_name": "Michael",\n "last_name": "Jordan",\n'...)
70+
# Not even a valid JSON, but we can still extract the JSON from the content
71+
72+
# CompletionResponse(content=' \nThe best answer is $\\boxed{\\{"name": "Michael Jordan",
73+
# "year_born": "1963", "year_retired": "2003"\\}}$')
74+
# Find the start of the boxed content
75+
6076

6177
class WatsonXInferenceAdapter(Inference, ModelRegistryHelper):
6278
def __init__(self, config: WatsonXConfig) -> None:
6379
ModelRegistryHelper.__init__(self, MODEL_ENTRIES)
6480

65-
print(f"Initializing watsonx InferenceAdapter({config.url})...")
66-
81+
logger.info(f"Initializing watsonx InferenceAdapter({config.url})...")
6782
self._config = config
83+
self._openai_client: AsyncOpenAI | None = None
6884

6985
self._project_id = self._config.project_id
7086

tests/integration/inference/test_openai_completion.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,7 @@ def skip_if_model_doesnt_support_openai_completion(client_with_models, model_id)
5858
# does not work with the specified model, gpt-5-mini. Please choose different model and try
5959
# again. You can learn more about which models can be used with each operation here:
6060
# https://go.microsoft.com/fwlink/?linkid=2197993.'}}"}
61+
"remote::watsonx", # return 404 when hitting the /openai/v1 endpoint
6162
):
6263
pytest.skip(f"Model {model_id} hosted by {provider.provider_type} doesn't support OpenAI completions.")
6364

@@ -110,6 +111,7 @@ def skip_if_model_doesnt_support_openai_chat_completion(client_with_models, mode
110111
"remote::cerebras",
111112
"remote::databricks",
112113
"remote::runpod",
114+
"remote::watsonx", # watsonx returns 404 when hitting the /openai/v1 endpoint
113115
):
114116
pytest.skip(f"Model {model_id} hosted by {provider.provider_type} doesn't support OpenAI chat completions.")
115117

tests/integration/inference/test_text_inference.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ def skip_if_model_doesnt_support_json_schema_structured_output(client_with_model
4545
provider_id = models[model_id].provider_id
4646
providers = {p.provider_id: p for p in client_with_models.providers.list()}
4747
provider = providers[provider_id]
48-
if provider.provider_type in ("remote::sambanova", "remote::azure"):
48+
if provider.provider_type in ("remote::sambanova", "remote::azure", "remote::watsonx"):
4949
pytest.skip(
5050
f"Model {model_id} hosted by {provider.provider_type} doesn't support json_schema structured output"
5151
)
@@ -211,6 +211,7 @@ def test_text_completion_log_probs_streaming(client_with_models, text_model_id,
211211
)
212212
def test_text_completion_structured_output(client_with_models, text_model_id, test_case):
213213
skip_if_model_doesnt_support_completion(client_with_models, text_model_id)
214+
skip_if_model_doesnt_support_json_schema_structured_output(client_with_models, text_model_id)
214215

215216
class AnswerFormat(BaseModel):
216217
name: str

0 commit comments

Comments
 (0)