Skip to content

Commit 5de0ae5

Browse files
committed
refactor(llmrails)!: remove deprecated return_context argument
The `return_context` argument has been removed from the LLMRails class methods as it was deprecated. Users are encouraged to use `GenerationOptions.output_vars = True` instead. This change simplifies the interface and eliminates deprecated functionality. BREAKING CHANGE: The `return_context` argument is no longer supported.
1 parent 67b8b7a commit 5de0ae5

File tree

1 file changed

+0
-23
lines changed

1 file changed

+0
-23
lines changed

nemoguardrails/rails/llm/llmrails.py

Lines changed: 0 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -572,7 +572,6 @@ async def generate_async(
572572
options: Optional[Union[dict, GenerationOptions]] = None,
573573
state: Optional[Union[dict, State]] = None,
574574
streaming_handler: Optional[StreamingHandler] = None,
575-
return_context: bool = False,
576575
) -> Union[str, dict, GenerationResponse, Tuple[dict, dict]]:
577576
"""Generate a completion or a next message.
578577
@@ -595,7 +594,6 @@ async def generate_async(
595594
state: The state object that should be used as the starting point.
596595
streaming_handler: If specified, and the config supports streaming, the
597596
provided handler will be used for streaming.
598-
return_context: Whether to return the context at the end of the run.
599597
600598
Returns:
601599
The completion (when a prompt is provided) or the next message.
@@ -619,19 +617,6 @@ async def generate_async(
619617
# Save the generation options in the current async context.
620618
generation_options_var.set(options)
621619

622-
if return_context:
623-
warnings.warn(
624-
"The `return_context` argument is deprecated and will be removed in 0.9.0. "
625-
"Use `GenerationOptions.output_vars = True` instead.",
626-
DeprecationWarning,
627-
stacklevel=2,
628-
)
629-
630-
# And we use the generation options mechanism instead.
631-
if options is None:
632-
options = GenerationOptions()
633-
options.output_vars = True
634-
635620
if streaming_handler:
636621
streaming_handler_var.set(streaming_handler)
637622

@@ -859,12 +844,6 @@ async def generate_async(
859844
# Otherwise, we return the full context
860845
res.output_data = context
861846

862-
# If the `return_context` is used, then we return a tuple to keep
863-
# the interface compatible.
864-
# TODO: remove this in 0.10.0.
865-
if return_context:
866-
return new_message, context
867-
868847
_log = compute_generation_log(processing_log)
869848

870849
# Include information about activated rails and LLM calls if requested
@@ -989,7 +968,6 @@ def generate(
989968
self,
990969
prompt: Optional[str] = None,
991970
messages: Optional[List[dict]] = None,
992-
return_context: bool = False,
993971
options: Optional[Union[dict, GenerationOptions]] = None,
994972
state: Optional[dict] = None,
995973
):
@@ -1009,7 +987,6 @@ def generate(
1009987
messages=messages,
1010988
options=options,
1011989
state=state,
1012-
return_context=return_context,
1013990
)
1014991
)
1015992

0 commit comments

Comments
 (0)