Skip to content

Commit 9765da7

Browse files
authored
Merge branch 'main' into async_openapi_tool
2 parents bf83f73 + d5bd8d9 commit 9765da7

File tree

58 files changed

+2977
-577
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

58 files changed

+2977
-577
lines changed

contributing/samples/adk_stale_agent/agent.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@
4949
BOT_ALERT_SIGNATURE = (
5050
"**Notification:** The author has updated the issue description"
5151
)
52+
BOT_NAME = "adk-bot"
5253

5354
# --- Global Cache ---
5455
_MAINTAINERS_CACHE: Optional[List[str]] = None
@@ -246,8 +247,9 @@ def _build_history_timeline(
246247
if BOT_ALERT_SIGNATURE in c_body:
247248
if last_bot_alert_time is None or c_time > last_bot_alert_time:
248249
last_bot_alert_time = c_time
250+
continue
249251

250-
if actor and not actor.endswith("[bot]"):
252+
if actor and not actor.endswith("[bot]") and actor != BOT_NAME:
251253
# Use edit time if available, otherwise creation time
252254
e_time = c.get("lastEditedAt")
253255
actual_time = dateutil.parser.isoparse(e_time) if e_time else c_time
@@ -263,7 +265,7 @@ def _build_history_timeline(
263265
if not e:
264266
continue
265267
actor = e.get("editor", {}).get("login")
266-
if actor and not actor.endswith("[bot]"):
268+
if actor and not actor.endswith("[bot]") and actor != BOT_NAME:
267269
history.append({
268270
"type": "edited_description",
269271
"actor": actor,
@@ -285,7 +287,7 @@ def _build_history_timeline(
285287
label_events.append(time_val)
286288
continue
287289

288-
if actor and not actor.endswith("[bot]"):
290+
if actor and not actor.endswith("[bot]") and actor != BOT_NAME:
289291
pretty_type = (
290292
"renamed_title" if etype == "RenamedTitleEvent" else "reopened"
291293
)

pyproject.toml

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,13 +26,13 @@ classifiers = [ # List of https://pypi.org/classifiers/
2626
dependencies = [
2727
# go/keep-sorted start
2828
"PyYAML>=6.0.2, <7.0.0", # For APIHubToolset.
29-
# TODO: Update aiosqlite version once https://github.com/omnilib/aiosqlite/issues/369 is fixed.
30-
"aiosqlite==0.21.0", # For SQLite database
29+
"aiosqlite>=0.21.0", # For SQLite database
3130
"anyio>=4.9.0, <5.0.0", # For MCP Session Manager
3231
"authlib>=1.5.1, <2.0.0", # For RestAPI Tool
3332
"click>=8.1.8, <9.0.0", # For CLI tools
3433
"fastapi>=0.115.0, <0.124.0", # FastAPI framework
3534
"google-api-python-client>=2.157.0, <3.0.0", # Google API client discovery
35+
"google-auth>=2.47.0", # Google Auth library
3636
"google-cloud-aiplatform[agent_engines]>=1.132.0, <2.0.0", # For VertexAI integrations, e.g. example store.
3737
"google-cloud-bigquery-storage>=2.0.0",
3838
"google-cloud-bigquery>=2.2.0",
@@ -111,6 +111,7 @@ eval = [
111111
"google-cloud-aiplatform[evaluation]>=1.100.0",
112112
"pandas>=2.2.3",
113113
"rouge-score>=0.1.2",
114+
"scipy<1.16; python_version<'3.11'",
114115
"tabulate>=0.9.0",
115116
# go/keep-sorted end
116117
]

src/google/adk/agents/config_schemas/AgentConfig.json

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2461,7 +2461,7 @@
24612461
}
24622462
],
24632463
"default": null,
2464-
"description": "Optional. LlmAgent.model. If not set, the model will be inherited from the ancestor.",
2464+
"description": "Optional. LlmAgent.model. Provide a model name string (e.g. \"gemini-2.0-flash\"). If not set, the model will be inherited from the ancestor or fall back to the system default (gemini-2.5-flash unless overridden via LlmAgent.set_default_model). To construct a model instance from code, use model_code.",
24652465
"title": "Model"
24662466
},
24672467
"instruction": {
@@ -4601,4 +4601,4 @@
46014601
}
46024602
],
46034603
"title": "AgentConfig"
4604-
}
4604+
}

src/google/adk/agents/live_request_queue.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -45,15 +45,6 @@ class LiveRequestQueue:
4545
"""Queue used to send LiveRequest in a live(bidirectional streaming) way."""
4646

4747
def __init__(self):
48-
# Ensure there's an event loop available in this thread
49-
try:
50-
asyncio.get_running_loop()
51-
except RuntimeError:
52-
# No running loop, create one
53-
loop = asyncio.new_event_loop()
54-
asyncio.set_event_loop(loop)
55-
56-
# Now create the queue (it will use the event loop we just ensured exists)
5748
self._queue = asyncio.Queue()
5849

5950
def close(self):

src/google/adk/agents/llm_agent.py

Lines changed: 29 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -183,10 +183,18 @@ async def _convert_tool_union_to_tools(
183183
class LlmAgent(BaseAgent):
184184
"""LLM-based Agent."""
185185

186+
DEFAULT_MODEL: ClassVar[str] = 'gemini-2.5-flash'
187+
"""System default model used when no model is set on an agent."""
188+
189+
_default_model: ClassVar[Union[str, BaseLlm]] = DEFAULT_MODEL
190+
"""Current default model used when an agent has no model set."""
191+
186192
model: Union[str, BaseLlm] = ''
187193
"""The model to use for the agent.
188194
189-
When not set, the agent will inherit the model from its ancestor.
195+
When not set, the agent will inherit the model from its ancestor. If no
196+
ancestor provides a model, the agent uses the default model configured via
197+
LlmAgent.set_default_model. The built-in default is gemini-2.5-flash.
190198
"""
191199

192200
config_type: ClassVar[Type[BaseAgentConfig]] = LlmAgentConfig
@@ -503,7 +511,24 @@ def canonical_model(self) -> BaseLlm:
503511
if isinstance(ancestor_agent, LlmAgent):
504512
return ancestor_agent.canonical_model
505513
ancestor_agent = ancestor_agent.parent_agent
506-
raise ValueError(f'No model found for {self.name}.')
514+
return self._resolve_default_model()
515+
516+
@classmethod
517+
def set_default_model(cls, model: Union[str, BaseLlm]) -> None:
518+
"""Overrides the default model used when an agent has no model set."""
519+
if not isinstance(model, (str, BaseLlm)):
520+
raise TypeError('Default model must be a model name or BaseLlm.')
521+
if isinstance(model, str) and not model:
522+
raise ValueError('Default model must be a non-empty string.')
523+
cls._default_model = model
524+
525+
@classmethod
526+
def _resolve_default_model(cls) -> BaseLlm:
527+
"""Resolves the current default model to a BaseLlm instance."""
528+
default_model = cls._default_model
529+
if isinstance(default_model, BaseLlm):
530+
return default_model
531+
return LLMRegistry.new_llm(default_model)
507532

508533
async def canonical_instruction(
509534
self, ctx: ReadonlyContext
@@ -575,10 +600,11 @@ async def canonical_tools(
575600
# because the built-in tools cannot be used together with other tools.
576601
# TODO(b/448114567): Remove once the workaround is no longer needed.
577602
multiple_tools = len(self.tools) > 1
603+
model = self.canonical_model
578604
for tool_union in self.tools:
579605
resolved_tools.extend(
580606
await _convert_tool_union_to_tools(
581-
tool_union, ctx, self.model, multiple_tools
607+
tool_union, ctx, model, multiple_tools
582608
)
583609
)
584610
return resolved_tools

src/google/adk/agents/llm_agent_config.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,8 +56,9 @@ class LlmAgentConfig(BaseAgentConfig):
5656
description=(
5757
'Optional. LlmAgent.model. Provide a model name string (e.g.'
5858
' "gemini-2.0-flash"). If not set, the model will be inherited from'
59-
' the ancestor. To construct a model instance from code, use'
60-
' model_code.'
59+
' the ancestor or fall back to the system default (gemini-2.5-flash'
60+
' unless overridden via LlmAgent.set_default_model). To construct a'
61+
' model instance from code, use model_code.'
6162
),
6263
)
6364

src/google/adk/agents/run_config.py

Lines changed: 124 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,9 +32,133 @@
3232

3333

3434
class StreamingMode(Enum):
35+
"""Streaming modes for agent execution.
36+
37+
This enum defines different streaming behaviors for how the agent returns
38+
events as model response.
39+
"""
40+
3541
NONE = None
42+
"""Non-streaming mode (default).
43+
44+
In this mode:
45+
- The runner returns one single content in a turn (one user / model
46+
interaction).
47+
- No partial/intermediate events are produced
48+
- Suitable for: CLI tools, batch processing, synchronous workflows
49+
50+
Example:
51+
```python
52+
config = RunConfig(streaming_mode=StreamingMode.NONE)
53+
async for event in runner.run_async(..., run_config=config):
54+
# event.partial is always False
55+
# Only final responses are yielded
56+
if event.content:
57+
print(event.content.parts[0].text)
58+
```
59+
"""
60+
3661
SSE = 'sse'
62+
"""Server-Sent Events (SSE) streaming mode.
63+
64+
In this mode:
65+
- The runner yields events progressively as the LLM generates responses
66+
- Both partial events (streaming chunks) and aggregated events are yielded
67+
- Suitable for: real-time display with typewriter effects in Web UIs, chat
68+
applications, interactive displays
69+
70+
Event Types in SSE Mode:
71+
- **Partial text events** (event.partial=True, contains text):
72+
Streaming text chunks for typewriter effect. These should typically be
73+
displayed to users in real-time.
74+
75+
- **Partial function call events** (event.partial=True, contains function_call):
76+
Internal streaming chunks used to progressively build function call
77+
arguments. These are typically NOT displayed to end users.
78+
79+
- **Aggregated events** (event.partial=False):
80+
The complete, aggregated response after all streaming chunks. Contains
81+
the full text or complete function call with all arguments.
82+
83+
Important Considerations:
84+
1. **Duplicate text issue**: With Progressive SSE Streaming enabled
85+
(default), you will receive both partial text chunks AND a final
86+
aggregated text event. To avoid displaying text twice:
87+
- Option A: Only display partial text events, skip final text events
88+
- Option B: Only display final events, skip all partial events
89+
- Option C: Track what's been displayed and skip duplicates
90+
91+
2. **Event filtering**: Applications should filter events based on their
92+
needs. Common patterns:
93+
94+
# Pattern 1: Display only partial text + final function calls
95+
async for event in runner.run_async(...):
96+
if event.partial and event.content and event.content.parts:
97+
# Check if it's text (not function call)
98+
if any(part.text for part in event.content.parts):
99+
if not any(part.function_call for part in event.content.parts):
100+
# Display partial text for typewriter effect
101+
text = ''.join(p.text or '' for p in event.content.parts)
102+
print(text, end='', flush=True)
103+
elif not event.partial and event.get_function_calls():
104+
# Display final function calls
105+
for fc in event.get_function_calls():
106+
print(f"Calling {fc.name}({fc.args})")
107+
108+
# Pattern 2: Display only final events (no streaming effect)
109+
async for event in runner.run_async(...):
110+
if not event.partial:
111+
# Only process final responses
112+
if event.content:
113+
text = ''.join(p.text or '' for p in event.content.parts)
114+
print(text)
115+
116+
3. **Progressive SSE Streaming feature**: Controlled by the
117+
ADK_ENABLE_PROGRESSIVE_SSE_STREAMING environment variable (default: ON).
118+
- When ON: Preserves original part ordering, supports function call
119+
argument streaming, produces partial events + final aggregated event
120+
- When OFF: Simple text accumulation, may lose some information
121+
122+
Example:
123+
```python
124+
config = RunConfig(streaming_mode=StreamingMode.SSE)
125+
displayed_text = ""
126+
127+
async for event in runner.run_async(..., run_config=config):
128+
if event.partial:
129+
# Partial streaming event
130+
if event.content and event.content.parts:
131+
# Check if this is text (not a function call)
132+
has_text = any(part.text for part in event.content.parts)
133+
has_fc = any(part.function_call for part in event.content.parts)
134+
135+
if has_text and not has_fc:
136+
# Display partial text chunks for typewriter effect
137+
text = ''.join(p.text or '' for p in event.content.parts)
138+
print(text, end='', flush=True)
139+
displayed_text += text
140+
else:
141+
# Final event - check if we already displayed this content
142+
if event.content:
143+
final_text = ''.join(p.text or '' for p in event.content.parts)
144+
if final_text != displayed_text:
145+
# New content not yet displayed
146+
print(final_text)
147+
```
148+
149+
See Also:
150+
- Event.is_final_response() for identifying final responses
151+
"""
152+
37153
BIDI = 'bidi'
154+
"""Bidirectional streaming mode.
155+
156+
So far this mode is not used in the standard execution path. The actual
157+
bidirectional streaming behavior via runner.run_live() uses a completely
158+
different code path that doesn't rely on streaming_mode.
159+
160+
For bidirectional streaming, use runner.run_live() instead of run_async().
161+
"""
38162

39163

40164
class RunConfig(BaseModel):

src/google/adk/auth/credential_manager.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -176,17 +176,13 @@ async def get_auth_credential(
176176
async def _load_existing_credential(
177177
self, callback_context: CallbackContext
178178
) -> Optional[AuthCredential]:
179-
"""Load existing credential from credential service or cached exchanged credential."""
179+
"""Load existing credential from credential service."""
180180

181181
# Try loading from credential service first
182182
credential = await self._load_from_credential_service(callback_context)
183183
if credential:
184184
return credential
185185

186-
# Check if we have a cached exchanged credential
187-
if self._auth_config.exchanged_auth_credential:
188-
return self._auth_config.exchanged_auth_credential
189-
190186
return None
191187

192188
async def _load_from_credential_service(

src/google/adk/cli/adk_web_server.py

Lines changed: 32 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1531,14 +1531,31 @@ async def event_generator():
15311531
)
15321532
) as agen:
15331533
async for event in agen:
1534-
# Format as SSE data
1535-
sse_event = event.model_dump_json(
1536-
exclude_none=True, by_alias=True
1537-
)
1538-
logger.debug(
1539-
"Generated event in agent run streaming: %s", sse_event
1540-
)
1541-
yield f"data: {sse_event}\n\n"
1534+
# ADK Web renders artifacts from `actions.artifactDelta`
1535+
# during part processing *and* during action processing
1536+
# 1) the original event with `artifactDelta` cleared (content)
1537+
# 2) a content-less "action-only" event carrying `artifactDelta`
1538+
events_to_stream = [event]
1539+
if (
1540+
event.actions.artifact_delta
1541+
and event.content
1542+
and event.content.parts
1543+
):
1544+
content_event = event.model_copy(deep=True)
1545+
content_event.actions.artifact_delta = {}
1546+
artifact_event = event.model_copy(deep=True)
1547+
artifact_event.content = None
1548+
events_to_stream = [content_event, artifact_event]
1549+
1550+
for event_to_stream in events_to_stream:
1551+
sse_event = event_to_stream.model_dump_json(
1552+
exclude_none=True,
1553+
by_alias=True,
1554+
)
1555+
logger.debug(
1556+
"Generated event in agent run streaming: %s", sse_event
1557+
)
1558+
yield f"data: {sse_event}\n\n"
15421559
except Exception as e:
15431560
logger.exception("Error in event_generator: %s", e)
15441561
# You might want to yield an error event here
@@ -1607,7 +1624,7 @@ async def run_agent_live(
16071624
user_id: str,
16081625
session_id: str,
16091626
modalities: List[Literal["TEXT", "AUDIO"]] = Query(
1610-
default=["TEXT", "AUDIO"]
1627+
default=["AUDIO"]
16111628
), # Only allows "TEXT" or "AUDIO"
16121629
) -> None:
16131630
await websocket.accept()
@@ -1625,9 +1642,12 @@ async def run_agent_live(
16251642

16261643
async def forward_events():
16271644
runner = await self.get_runner_async(app_name)
1645+
run_config = RunConfig(response_modalities=modalities)
16281646
async with Aclosing(
16291647
runner.run_live(
1630-
session=session, live_request_queue=live_request_queue
1648+
session=session,
1649+
live_request_queue=live_request_queue,
1650+
run_config=run_config,
16311651
)
16321652
) as agen:
16331653
async for event in agen:
@@ -1657,7 +1677,8 @@ async def process_messages():
16571677
for task in done:
16581678
task.result()
16591679
except WebSocketDisconnect:
1660-
logger.info("Client disconnected during process_messages.")
1680+
# Disconnection could happen when receive or send text via websocket
1681+
logger.info("Client disconnected during live session.")
16611682
except Exception as e:
16621683
logger.exception("Error during live websocket communication: %s", e)
16631684
traceback.print_exc()

0 commit comments

Comments
 (0)