Skip to content

Commit

Permalink
Make swelancer agent use an async completion function (#51)
Browse files Browse the repository at this point in the history
  • Loading branch information
kliu128 authored Mar 3, 2025
1 parent 12621cb commit e2c75f7
Showing 1 changed file with 6 additions and 6 deletions.
12 changes: 6 additions & 6 deletions swelancer_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,12 +36,12 @@
from nanoeval_alcatraz.task_to_alcatraz_config import task_to_alcatraz_config
from nanoeval_alcatraz.alcatraz_computer_interface import AlcatrazComputerInterface

from openai import OpenAI
from openai import AsyncOpenAI
import os
import tiktoken


client = OpenAI(
client = AsyncOpenAI(
api_key=os.environ.get("OPENAI_API_KEY"), # This is the default and can be omitted
)

Expand All @@ -65,10 +65,10 @@ def trim_messages(messages: list[dict[str, Any]], max_tokens: int, model: str =
messages.pop(1)
return messages

def get_model_response(messages: list[dict[str, Any]]) -> str:
async def get_model_response(messages: list[dict[str, Any]]) -> str:
messages = trim_messages(messages, 110000)

chat_completion = client.chat.completions.create(
chat_completion = await client.chat.completions.create(
messages=messages, # type: ignore
model="gpt-4o",
)
Expand Down Expand Up @@ -126,7 +126,7 @@ async def run(self, task: ComputerTask) -> AsyncGenerator[Step | FinalResult, No
print(messages)

for remaining_turns in range(max_turns, 0, -1):
model_response = get_model_response(messages)
model_response = await get_model_response(messages)
print(model_response)

messages.append({"role": "assistant", "content": model_response})
Expand Down Expand Up @@ -182,4 +182,4 @@ async def run(self, task: ComputerTask) -> AsyncGenerator[Step | FinalResult, No
raise
yield FinalResultSuccessful(
grade=Grade(score=0, grader_log=f"Grading failed with error: {str(e)}")
)
)

0 comments on commit e2c75f7

Please sign in to comment.