diff --git a/integrations/google_genai/src/haystack_integrations/components/generators/google_genai/chat/chat_generator.py b/integrations/google_genai/src/haystack_integrations/components/generators/google_genai/chat/chat_generator.py index 693d9fcc73..6cede2f3d4 100644 --- a/integrations/google_genai/src/haystack_integrations/components/generators/google_genai/chat/chat_generator.py +++ b/integrations/google_genai/src/haystack_integrations/components/generators/google_genai/chat/chat_generator.py @@ -6,6 +6,7 @@ import json from collections.abc import AsyncIterator, Iterator from datetime import datetime, timezone +from types import TracebackType from typing import Any, Literal, Optional from google.genai import types @@ -507,6 +508,14 @@ def __init__( self._streaming_callback = streaming_callback self._tools = tools + def __del__(self): + self._client.close() + + def __exit__( + self, exc_type: Optional[Exception], exc_value: Optional[Exception], traceback: Optional[TracebackType] + ) -> None: + self._client.close() + def to_dict(self) -> dict[str, Any]: """ Serializes the component to a dictionary. @@ -880,15 +889,14 @@ def run( config=config, ) return self._handle_streaming_response(response_stream, streaming_callback) - else: - # Use non-streaming - response = self._client.models.generate_content( - model=self._model, - contents=contents, - config=config, - ) - reply = _convert_google_genai_response_to_chatmessage(response, self._model) - return {"replies": [reply]} + # Use non-streaming + response = self._client.models.generate_content( + model=self._model, + contents=contents, + config=config, + ) + reply = _convert_google_genai_response_to_chatmessage(response, self._model) + return {"replies": [reply]} except Exception as e: # Check if the error is related to thinking configuration @@ -989,15 +997,14 @@ async def run_async( config=config, ) return await self._handle_streaming_response_async(response_stream, streaming_callback) - else: - # Use non-streaming - response = await self._client.aio.models.generate_content( - model=self._model, - contents=contents, - config=config, - ) - reply = _convert_google_genai_response_to_chatmessage(response, self._model) - return {"replies": [reply]} + # Use non-streaming + response = await self._client.aio.models.generate_content( + model=self._model, + contents=contents, + config=config, + ) + reply = _convert_google_genai_response_to_chatmessage(response, self._model) + return {"replies": [reply]} except Exception as e: # Check if the error is related to thinking configuration