Skip to content

Commit 74763a0

Browse files
authored
feat: responses get/retrieve streaming + ergonomics (#489)
* ergonomics impls * update exampels * improve tool-call-stream * ergo * udpated example * add strict example * allow other voices * support for GET call to return a stream; use it in Responses GET with stream * add example to retrive a response with streaming
1 parent 8e35956 commit 74763a0

File tree

12 files changed

+331
-269
lines changed

12 files changed

+331
-269
lines changed

async-openai/src/client.rs

Lines changed: 6 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -544,23 +544,18 @@ impl<C: Config> Client<C> {
544544
}
545545

546546
/// Make HTTP GET request to receive SSE
547-
pub(crate) async fn _get_stream<Q, O>(
547+
pub(crate) async fn get_stream<O>(
548548
&self,
549549
path: &str,
550-
query: &Q,
550+
request_options: &RequestOptions,
551551
) -> Pin<Box<dyn Stream<Item = Result<O, OpenAIError>> + Send>>
552552
where
553-
Q: Serialize + ?Sized,
554553
O: DeserializeOwned + std::marker::Send + 'static,
555554
{
556-
let event_source = self
557-
.http_client
558-
.get(self.config.url(path))
559-
.query(query)
560-
.query(&self.config.query())
561-
.headers(self.config.headers())
562-
.eventsource()
563-
.unwrap();
555+
let request_builder =
556+
self.build_request_builder(reqwest::Method::GET, path, request_options);
557+
558+
let event_source = request_builder.eventsource().unwrap();
564559

565560
stream(event_source).await
566561
}

async-openai/src/responses.rs

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,25 @@ impl<'c, C: Config> Responses<'c, C> {
8181
.await
8282
}
8383

84+
/// Retrieves a model response with the given ID with streaming.
85+
///
86+
/// Response events will be sent as server-sent events as they become available.
87+
#[crate::byot(
88+
T0 = std::fmt::Display,
89+
R = serde::de::DeserializeOwned,
90+
stream = "true",
91+
where_clause = "R: std::marker::Send + 'static"
92+
)]
93+
pub async fn retrieve_stream(&self, response_id: &str) -> Result<ResponseStream, OpenAIError> {
94+
let mut request_options = self.request_options.clone();
95+
request_options.with_query(&[("stream", "true")])?;
96+
97+
Ok(self
98+
.client
99+
.get_stream(&format!("/responses/{}", response_id), &request_options)
100+
.await)
101+
}
102+
84103
/// Deletes a model response with the given ID.
85104
#[crate::byot(T0 = std::fmt::Display, R = serde::de::DeserializeOwned)]
86105
pub async fn delete(&self, response_id: &str) -> Result<DeleteResponse, OpenAIError> {

async-openai/src/types/audio/audio_types.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,8 @@ pub enum Voice {
2222
Sage,
2323
Shimmer,
2424
Verse,
25+
#[serde(untagged)]
26+
Other(String),
2527
}
2628

2729
#[derive(Debug, Default, Clone, PartialEq)]

async-openai/src/types/chat/impls.rs

Lines changed: 70 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
11
use std::fmt::Display;
22

33
use crate::types::chat::{
4-
ChatCompletionFunctionCall, ChatCompletionNamedToolChoice,
4+
ChatCompletionFunctionCall, ChatCompletionMessageCustomToolCall, ChatCompletionMessageToolCall,
5+
ChatCompletionMessageToolCalls, ChatCompletionNamedToolChoice,
56
ChatCompletionRequestAssistantMessage, ChatCompletionRequestAssistantMessageContent,
67
ChatCompletionRequestDeveloperMessage, ChatCompletionRequestDeveloperMessageContent,
78
ChatCompletionRequestFunctionMessage, ChatCompletionRequestMessage,
@@ -10,7 +11,8 @@ use crate::types::chat::{
1011
ChatCompletionRequestSystemMessageContent, ChatCompletionRequestToolMessage,
1112
ChatCompletionRequestToolMessageContent, ChatCompletionRequestUserMessage,
1213
ChatCompletionRequestUserMessageContent, ChatCompletionRequestUserMessageContentPart,
13-
FunctionName, ImageUrl, Role,
14+
ChatCompletionTool, ChatCompletionTools, CustomToolChatCompletions, FunctionName, ImageUrl,
15+
Role,
1416
};
1517

1618
impl From<ChatCompletionRequestUserMessage> for ChatCompletionRequestMessage {
@@ -332,3 +334,69 @@ impl Display for Role {
332334
)
333335
}
334336
}
337+
338+
impl From<ChatCompletionTool> for Vec<ChatCompletionTools> {
339+
fn from(value: ChatCompletionTool) -> Self {
340+
vec![ChatCompletionTools::Function(value)]
341+
}
342+
}
343+
344+
impl From<CustomToolChatCompletions> for Vec<ChatCompletionTools> {
345+
fn from(value: CustomToolChatCompletions) -> Self {
346+
vec![ChatCompletionTools::Custom(value)]
347+
}
348+
}
349+
350+
impl From<ChatCompletionRequestUserMessage> for Vec<ChatCompletionRequestMessage> {
351+
fn from(value: ChatCompletionRequestUserMessage) -> Self {
352+
vec![value.into()]
353+
}
354+
}
355+
356+
impl From<ChatCompletionRequestSystemMessage> for Vec<ChatCompletionRequestMessage> {
357+
fn from(value: ChatCompletionRequestSystemMessage) -> Self {
358+
vec![value.into()]
359+
}
360+
}
361+
362+
impl From<ChatCompletionRequestDeveloperMessage> for Vec<ChatCompletionRequestMessage> {
363+
fn from(value: ChatCompletionRequestDeveloperMessage) -> Self {
364+
vec![value.into()]
365+
}
366+
}
367+
368+
impl From<ChatCompletionRequestAssistantMessage> for Vec<ChatCompletionRequestMessage> {
369+
fn from(value: ChatCompletionRequestAssistantMessage) -> Self {
370+
vec![value.into()]
371+
}
372+
}
373+
374+
impl From<ChatCompletionRequestFunctionMessage> for Vec<ChatCompletionRequestMessage> {
375+
fn from(value: ChatCompletionRequestFunctionMessage) -> Self {
376+
vec![value.into()]
377+
}
378+
}
379+
380+
impl From<ChatCompletionRequestToolMessage> for Vec<ChatCompletionRequestMessage> {
381+
fn from(value: ChatCompletionRequestToolMessage) -> Self {
382+
vec![value.into()]
383+
}
384+
}
385+
386+
impl From<ChatCompletionMessageToolCall> for ChatCompletionMessageToolCalls {
387+
fn from(value: ChatCompletionMessageToolCall) -> Self {
388+
ChatCompletionMessageToolCalls::Function(value)
389+
}
390+
}
391+
392+
impl From<ChatCompletionMessageCustomToolCall> for ChatCompletionMessageToolCalls {
393+
fn from(value: ChatCompletionMessageCustomToolCall) -> Self {
394+
ChatCompletionMessageToolCalls::Custom(value)
395+
}
396+
}
397+
398+
impl From<ImageUrl> for ChatCompletionRequestMessageContentPartImage {
399+
fn from(value: ImageUrl) -> Self {
400+
ChatCompletionRequestMessageContentPartImage { image_url: value }
401+
}
402+
}

async-openai/src/types/realtime/session.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -150,6 +150,8 @@ pub enum RealtimeVoice {
150150
Verse,
151151
Marin,
152152
Cedar,
153+
#[serde(untagged)]
154+
Other(String),
153155
}
154156

155157
#[derive(Debug, Serialize, Deserialize, Clone)]

examples/chat/src/main.rs

Lines changed: 10 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,8 @@ use std::error::Error;
33
use async_openai::{
44
traits::RequestOptionsBuilder,
55
types::chat::{
6-
ChatCompletionRequestAssistantMessageArgs, ChatCompletionRequestSystemMessageArgs,
7-
ChatCompletionRequestUserMessageArgs, CreateChatCompletionRequestArgs,
6+
ChatCompletionRequestAssistantMessage, ChatCompletionRequestSystemMessage,
7+
ChatCompletionRequestUserMessage, CreateChatCompletionRequestArgs,
88
},
99
Client,
1010
};
@@ -17,22 +17,14 @@ async fn main() -> Result<(), Box<dyn Error>> {
1717
.max_tokens(512u32)
1818
.model("gpt-3.5-turbo")
1919
.messages([
20-
ChatCompletionRequestSystemMessageArgs::default()
21-
.content("You are a helpful assistant.")
22-
.build()?
23-
.into(),
24-
ChatCompletionRequestUserMessageArgs::default()
25-
.content("Who won the world series in 2020?")
26-
.build()?
27-
.into(),
28-
ChatCompletionRequestAssistantMessageArgs::default()
29-
.content("The Los Angeles Dodgers won the World Series in 2020.")
30-
.build()?
31-
.into(),
32-
ChatCompletionRequestUserMessageArgs::default()
33-
.content("Where was it played?")
34-
.build()?
35-
.into(),
20+
// Can also use ChatCompletionRequest<Role>MessageArgs for builder pattern
21+
ChatCompletionRequestSystemMessage::from("You are a helpful assistant.").into(),
22+
ChatCompletionRequestUserMessage::from("Who won the world series in 2020?").into(),
23+
ChatCompletionRequestAssistantMessage::from(
24+
"The Los Angeles Dodgers won the World Series in 2020.",
25+
)
26+
.into(),
27+
ChatCompletionRequestUserMessage::from("Where was it played?").into(),
3628
])
3729
.build()?;
3830

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
[package]
2+
name = "responses-retrive-stream"
3+
version = "0.1.0"
4+
edition = "2021"
5+
publish = false
6+
7+
[dependencies]
8+
async-openai = { path = "../../async-openai" }
9+
tokio = { version = "1.0", features = ["full"] }
10+
futures = "0.3"
11+
serde_json = "1.0"
12+
Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
use async_openai::{
2+
types::responses::{CreateResponseArgs, ResponseStreamEvent},
3+
Client,
4+
};
5+
use futures::StreamExt;
6+
use std::error::Error;
7+
use std::io::{stdout, Write};
8+
9+
#[tokio::main]
10+
async fn main() -> Result<(), Box<dyn Error>> {
11+
let client = Client::new();
12+
13+
// First, create a response with background=true and stream=true
14+
println!("Creating a respose with background=true and stream=true ...");
15+
let create_request = CreateResponseArgs::default()
16+
.model("gpt-4.1")
17+
.background(true)
18+
.stream(true)
19+
.input("Write a function in Rust that adds two u32 and returns u64'")
20+
.build()?;
21+
22+
let mut response = client.responses().create_stream(create_request).await?;
23+
let mut response_id = None;
24+
let mut lock = stdout().lock();
25+
26+
while let Some(result) = response.next().await {
27+
if let Ok(ResponseStreamEvent::ResponseCreated(event)) = result {
28+
writeln!(lock, "Response created with ID: {}", event.response.id).unwrap();
29+
response_id = Some(event.response.id.clone());
30+
break;
31+
}
32+
}
33+
34+
if let Some(response_id) = response_id {
35+
writeln!(lock, "\nRetrieving {} with streaming...\n", &response_id).unwrap();
36+
let mut retrieve_stream = client.responses().retrieve_stream(&response_id).await?;
37+
while let Some(result) = retrieve_stream.next().await {
38+
if let Ok(ResponseStreamEvent::ResponseOutputTextDelta(delta)) = result {
39+
write!(lock, "{}", delta.delta).unwrap();
40+
}
41+
}
42+
}
43+
44+
Ok(())
45+
}

0 commit comments

Comments
 (0)