|
4 | 4 |
|
5 | 5 | from datetime import datetime |
6 | 6 | from typing import TYPE_CHECKING as _TYPE_CHECKING |
7 | | -from typing import Dict, List, Literal, Optional |
| 7 | +from typing import Dict, Literal, Optional |
8 | 8 |
|
9 | 9 | from codex import AuthenticationError |
10 | 10 |
|
|
16 | 16 | from datetime import datetime |
17 | 17 |
|
18 | 18 | from codex import Codex as _Codex |
19 | | - from codex.types.project_validate_params import Options as ProjectValidateOptions |
20 | 19 | from codex.types.project_validate_response import ProjectValidateResponse |
21 | 20 |
|
22 | 21 |
|
@@ -151,23 +150,49 @@ def validate( |
151 | 150 | query: str, |
152 | 151 | response: str, |
153 | 152 | *, |
154 | | - constrain_outputs: Optional[List[str]] = None, |
155 | 153 | custom_metadata: Optional[object] = None, |
156 | 154 | eval_scores: Optional[Dict[str, float]] = None, |
157 | 155 | eval_thresholds: Optional[Dict[str, float]] = None, |
158 | | - options: Optional[ProjectValidateOptions] = None, |
159 | 156 | quality_preset: Literal["best", "high", "medium", "low", "base"] = "medium", |
160 | 157 | ) -> ProjectValidateResponse: |
| 158 | + """Run validation on a query to an AI system. |
| 159 | +
|
| 160 | + Args: |
| 161 | + context (str): The context used by the AI system to generate a response for the query. |
| 162 | + prompt (str): The full prompt (including system instructions, context, and the original query) used by the AI system to generate a response for the query. |
| 163 | + query (str): The original user input to the AI system. |
| 164 | + response (str): The response generated by the AI system for the query. |
| 165 | + custom_metadata (object, optional): Custom metadata to log in Codex for the query. |
| 166 | + eval_scores (Dict[str, float], optional): Optional scores to use for the query. When provided, Codex will skip running TrustworthyRAG evaluations on the query and use the provided scores instead. |
| 167 | + eval_thresholds (Dict[str, float], optional): Optional thresholds to use for evaluating the query. We recommend configuring thresholds on the Project instead and using the same thresholds for all queries. |
| 168 | + quality_preset (Literal["best", "high", "medium", "low", "base"], optional): The quality preset to use for the query. |
| 169 | +
|
| 170 | + Returns: |
| 171 | + ProjectValidateResponse: The response from the validation. |
| 172 | + """ |
161 | 173 | return self._sdk_client.projects.validate( |
162 | 174 | self._id, |
163 | 175 | context=context, |
164 | 176 | prompt=prompt, |
165 | 177 | query=query, |
166 | 178 | response=response, |
167 | | - constrain_outputs=constrain_outputs, |
168 | 179 | custom_eval_thresholds=eval_thresholds, |
169 | 180 | custom_metadata=custom_metadata, |
170 | 181 | eval_scores=eval_scores, |
171 | | - options=options, |
172 | 182 | quality_preset=quality_preset, |
173 | 183 | ) |
| 184 | + |
| 185 | + def add_remediation(self, question: str, answer: str | None = None) -> None: |
| 186 | + """Add a remediation to the project. A remediation represents a question and answer pair that is expert verified |
| 187 | + and should be used to answer future queries to the AI system that are similar to the question. |
| 188 | +
|
| 189 | + Args: |
| 190 | + question (str): The question to add to the project. |
| 191 | + answer (str, optional): The expert answer for the question. If not provided, the question will be added to the project without an expert answer. |
| 192 | + """ |
| 193 | + self._sdk_client.projects.remediations.create( |
| 194 | + project_id=self.id, |
| 195 | + question=question, |
| 196 | + answer=answer, |
| 197 | + extra_headers=_AnalyticsMetadata().to_headers(), |
| 198 | + ) |
0 commit comments