Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions src/google/adk/apps/llm_event_summarizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,9 +104,12 @@ async def maybe_summarize_events(
contents=[Content(role='user', parts=[Part(text=prompt)])],
)
summary_content = None
usage_metadata = None
async for llm_response in self._llm.generate_content_async(
llm_request, stream=False
):
if llm_response.usage_metadata is not None:
usage_metadata = llm_response.usage_metadata
if llm_response.content:
summary_content = llm_response.content
break
Expand All @@ -132,4 +135,5 @@ async def maybe_summarize_events(
author='user',
actions=actions,
invocation_id=Event.new_id(),
usage_metadata=usage_metadata,
)
35 changes: 33 additions & 2 deletions tests/unittests/apps/test_llm_event_summarizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
from google.genai.types import Content
from google.genai.types import FunctionCall
from google.genai.types import FunctionResponse
from google.genai.types import GenerateContentResponseUsageMetadata
from google.genai.types import Part
import pytest

Expand Down Expand Up @@ -57,7 +58,9 @@ async def test_maybe_compact_events_success(self):
expected_prompt = self.compactor._DEFAULT_PROMPT_TEMPLATE.format(
conversation_history=expected_conversation_history
)
mock_llm_response = Mock(content=Content(parts=[Part(text='Summary')]))
mock_llm_response = Mock(
content=Content(parts=[Part(text='Summary')]), usage_metadata=None
)

async def async_gen():
yield mock_llm_response
Expand Down Expand Up @@ -90,11 +93,39 @@ async def async_gen():
self.assertEqual(llm_request.contents[0].parts[0].text, expected_prompt)
self.assertFalse(kwargs['stream'])

async def test_maybe_compact_events_includes_usage_metadata(self):
events = [
self._create_event(1.0, 'Hello', 'user'),
self._create_event(2.0, 'Hi there!', 'model'),
]
usage_metadata = GenerateContentResponseUsageMetadata(
prompt_token_count=10,
candidates_token_count=5,
total_token_count=15,
)
mock_llm_response = Mock(
content=Content(parts=[Part(text='Summary')]),
usage_metadata=usage_metadata,
)

async def async_gen():
yield mock_llm_response

self.mock_llm.generate_content_async.return_value = async_gen()

compacted_event = await self.compactor.maybe_summarize_events(events=events)

self.assertIsNotNone(compacted_event)
self.assertIsNotNone(compacted_event.usage_metadata)
self.assertEqual(compacted_event.usage_metadata.prompt_token_count, 10)
self.assertEqual(compacted_event.usage_metadata.candidates_token_count, 5)
self.assertEqual(compacted_event.usage_metadata.total_token_count, 15)

async def test_maybe_compact_events_empty_llm_response(self):
events = [
self._create_event(1.0, 'Hello', 'user'),
]
mock_llm_response = Mock(content=None)
mock_llm_response = Mock(content=None, usage_metadata=None)

async def async_gen():
yield mock_llm_response
Expand Down