Skip to content

Commit 3ef4bd5

Browse files
authored
update docstring for validate response (#115)
1 parent bd26e90 commit 3ef4bd5

File tree

5 files changed

+17
-6
lines changed

5 files changed

+17
-6
lines changed

CHANGELOG.md

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
77

88
## [Unreleased]
99

10+
## [1.0.31] 2025-10-14
11+
12+
- Add `expert_guardrail_override_explanation` and `log_id` to `ProjectValidateResponse` docstring
13+
1014
## [1.0.30] 2025-10-01
1115

1216
- Update API reference language from Codex -> Cleanlab AI Platform
@@ -141,7 +145,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
141145

142146
- Initial release of the `cleanlab-codex` client library.
143147

144-
[Unreleased]: https://github.com/cleanlab/cleanlab-codex/compare/v1.0.30...HEAD
148+
[Unreleased]: https://github.com/cleanlab/cleanlab-codex/compare/v1.0.31...HEAD
149+
[1.0.31]: https://github.com/cleanlab/cleanlab-codex/compare/v1.0.30...v1.0.31
145150
[1.0.30]: https://github.com/cleanlab/cleanlab-codex/compare/v1.0.29...v1.0.30
146151
[1.0.29]: https://github.com/cleanlab/cleanlab-codex/compare/v1.0.28...v1.0.29
147152
[1.0.28]: https://github.com/cleanlab/cleanlab-codex/compare/v1.0.27...v1.0.28

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ classifiers = [
2626
]
2727
dependencies = [
2828
"cleanlab-tlm~=1.1,>=1.1.14",
29-
"codex-sdk==0.1.0a28",
29+
"codex-sdk==0.1.0a30",
3030
"pydantic>=2.0.0, <3",
3131
]
3232

src/cleanlab_codex/__about__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
# SPDX-License-Identifier: MIT
2-
__version__ = "1.0.30"
2+
__version__ = "1.0.31"

src/cleanlab_codex/project.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,11 @@
1818

1919
from codex import Codex as _Codex
2020
from codex.types.project_validate_response import ProjectValidateResponse
21-
from openai.types.chat import ChatCompletion, ChatCompletionMessageParam, ChatCompletionToolParam
21+
from openai.types.chat import (
22+
ChatCompletion,
23+
ChatCompletionMessageParam,
24+
ChatCompletionToolParam,
25+
)
2226

2327

2428
_ERROR_CREATE_ACCESS_KEY = (
@@ -187,6 +191,8 @@ def validate(
187191
- escalated_to_sme (bool): True if the query should be escalated to SME for review. When True, the query is logged and may be answered by an expert.
188192
- eval_scores (dict[str, ThresholdedEvalScore]): Evaluation scores for different response attributes (e.g., trustworthiness, helpfulness, ...). Each includes a numeric score and a `failed` flag indicating whether the score falls below threshold.
189193
- expert_answer (str | None): If it was auto-determined that this query should be escalated to SME, and a prior SME answer for a similar query was found, then this will return that expert answer. Otherwise, it is None.
194+
- expert_guardrail_override_explanation (str | None): If the final guardrail decision was overridden by expert review, this will contain an explanation for why the guardrail result was overridden. Otherwise, it is None.
195+
- log_id (str): The ID of the log created for this query.
190196
191197
When available, consider swapping your AI response with the expert answer before serving the response to your user.
192198
"""

tests/test_project.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,8 +32,8 @@ def test_project_validate_with_dict_response(
3232
openai_messages_conversational: list["ChatCompletionMessageParam"],
3333
) -> None:
3434
expected_result = ProjectValidateResponse(
35-
is_bad_response=True,
3635
expert_answer=None,
36+
expert_guardrail_override_explanation=None,
3737
eval_scores={
3838
"response_helpfulness": EvalScores(
3939
score=0.8,
@@ -125,7 +125,7 @@ def test_project_validate_with_tools(
125125
openai_tools: list["ChatCompletionToolParam"],
126126
) -> None:
127127
expected_result = ProjectValidateResponse(
128-
is_bad_response=True,
128+
expert_guardrail_override_explanation=None,
129129
expert_answer=None,
130130
eval_scores={
131131
"response_helpfulness": EvalScores(

0 commit comments

Comments
 (0)