From aad2384fd29ac445e90270d7dd61e3306d5beae8 Mon Sep 17 00:00:00 2001 From: phact Date: Fri, 15 Mar 2024 16:10:15 -0400 Subject: [PATCH] change client name --- tests/streaming-assistants/conftest.py | 2 +- .../test_chat_completion.py | 24 +++++++------- tests/streaming-assistants/test_embedding.py | 20 ++++++------ .../test_file_embedding.py | 4 +-- .../test_function_calling.py | 32 +++++++++---------- tests/streaming-assistants/test_run.py | 32 +++++++++---------- .../test_run_retreival.py | 32 +++++++++---------- .../test_streaming_run.py | 32 +++++++++---------- .../test_streaming_run_retrieval.py | 30 ++++++++--------- 9 files changed, 104 insertions(+), 104 deletions(-) diff --git a/tests/streaming-assistants/conftest.py b/tests/streaming-assistants/conftest.py index cdafb3d..37d91c1 100644 --- a/tests/streaming-assistants/conftest.py +++ b/tests/streaming-assistants/conftest.py @@ -88,6 +88,6 @@ async def check_for_stop(): @pytest.fixture(scope="function") -def openai_client(start_application) -> OpenAI: +def patched_openai_client(start_application) -> OpenAI: oai = patch(OpenAI()) return oai diff --git a/tests/streaming-assistants/test_chat_completion.py b/tests/streaming-assistants/test_chat_completion.py index 767be22..5707138 100644 --- a/tests/streaming-assistants/test_chat_completion.py +++ b/tests/streaming-assistants/test_chat_completion.py @@ -35,26 +35,26 @@ def print_chat_completion(model, client): assert i > 0 -def test_chat_completion_gpt4(openai_client): +def test_chat_completion_gpt4(patched_openai_client): model="gpt-4-1106-preview" - print_chat_completion(model, openai_client) + print_chat_completion(model, patched_openai_client) -def test_chat_completion_gpt3_5(openai_client): +def test_chat_completion_gpt3_5(patched_openai_client): model="gpt-3.5-turbo" - print_chat_completion(model, openai_client) + print_chat_completion(model, patched_openai_client) -def test_chat_completion_cohere(openai_client): +def test_chat_completion_cohere(patched_openai_client): model="cohere/command" - print_chat_completion(model, openai_client) + print_chat_completion(model, patched_openai_client) -def test_chat_completion_perp_mixtral(openai_client): +def test_chat_completion_perp_mixtral(patched_openai_client): model="perplexity/mixtral-8x7b-instruct" - print_chat_completion(model, openai_client) + print_chat_completion(model, patched_openai_client) -def test_chat_completion_claude(openai_client): +def test_chat_completion_claude(patched_openai_client): model="anthropic.claude-v2" - print_chat_completion(model, openai_client) + print_chat_completion(model, patched_openai_client) -def test_chat_completion_gemini_pro(openai_client): +def test_chat_completion_gemini_pro(patched_openai_client): model="gemini/gemini-pro" - print_chat_completion(model, openai_client) + print_chat_completion(model, patched_openai_client) diff --git a/tests/streaming-assistants/test_embedding.py b/tests/streaming-assistants/test_embedding.py index 6927a64..bedb681 100644 --- a/tests/streaming-assistants/test_embedding.py +++ b/tests/streaming-assistants/test_embedding.py @@ -25,24 +25,24 @@ def print_embedding(model, client): -def test_embedding_cohere(openai_client): +def test_embedding_cohere(patched_openai_client): model = "cohere/embed-english-v3.0" - print_embedding(model, openai_client) + print_embedding(model, patched_openai_client) -def test_embedding_titan(openai_client): +def test_embedding_titan(patched_openai_client): model = "amazon.titan-embed-text-v1" - print_embedding(model, openai_client) + print_embedding(model, patched_openai_client) -def test_embedding_ada_002(openai_client): +def test_embedding_ada_002(patched_openai_client): model = "text-embedding-ada-002" - print_embedding(model, openai_client) + print_embedding(model, patched_openai_client) -def test_embedding_3_small(openai_client): +def test_embedding_3_small(patched_openai_client): model="text-embedding-3-small" - print_embedding(model, openai_client) + print_embedding(model, patched_openai_client) -def test_embedding_3_small(openai_client): +def test_embedding_3_small(patched_openai_client): model="text-embedding-3-large" - print_embedding(model, openai_client) + print_embedding(model, patched_openai_client) diff --git a/tests/streaming-assistants/test_file_embedding.py b/tests/streaming-assistants/test_file_embedding.py index a91d3d3..3acc972 100644 --- a/tests/streaming-assistants/test_file_embedding.py +++ b/tests/streaming-assistants/test_file_embedding.py @@ -1,8 +1,8 @@ import logging logger = logging.getLogger(__name__) -def test_file_embedding(openai_client): - file = openai_client.files.create( +def test_file_embedding(patched_openai_client): + file = patched_openai_client.files.create( file=open( "./tests/fixtures/language_models_are_unsupervised_multitask_learners.pdf", "rb", diff --git a/tests/streaming-assistants/test_function_calling.py b/tests/streaming-assistants/test_function_calling.py index 1b85fe3..630052f 100644 --- a/tests/streaming-assistants/test_function_calling.py +++ b/tests/streaming-assistants/test_function_calling.py @@ -4,41 +4,41 @@ logger = logging.getLogger(__name__) -def test_function_calling_gpt_4(openai_client): +def test_function_calling_gpt_4(patched_openai_client): model="gpt-4-1106-preview" - function_calling(model, openai_client) + function_calling(model, patched_openai_client) -def test_function_calling_gpt_3_5(openai_client): +def test_function_calling_gpt_3_5(patched_openai_client): model="gpt-3.5-turbo" - function_calling(model, openai_client) + function_calling(model, patched_openai_client) @pytest.mark.skip(reason="claude does not consistently work with function calling, skip") -def test_function_calling_cohere(openai_client): +def test_function_calling_cohere(patched_openai_client): model="cohere/command" - function_calling(model, openai_client) + function_calling(model, patched_openai_client) -def test_function_calling_pplx_mix(openai_client): +def test_function_calling_pplx_mix(patched_openai_client): model="perplexity/mixtral-8x7b-instruct" - function_calling(model, openai_client) + function_calling(model, patched_openai_client) @pytest.mark.skip(reason="pplx_online just looks up the weather and doesn't do the function call") -def test_function_calling_pplx_online(openai_client): +def test_function_calling_pplx_online(patched_openai_client): model="perplexity/pplx-70b-online" - function_calling(model, openai_client) + function_calling(model, patched_openai_client) @pytest.mark.skip(reason="claude does not consistently work with function calling, skip") -def test_function_calling_claude(openai_client): +def test_function_calling_claude(patched_openai_client): model="anthropic.claude-v2" - function_calling(model, openai_client) + function_calling(model, patched_openai_client) -def test_function_calling_gemini(openai_client): +def test_function_calling_gemini(patched_openai_client): model="gemini/gemini-pro" - function_calling(model, openai_client) + function_calling(model, patched_openai_client) @pytest.mark.skip(reason="llama does not consistently work with function calling, skip") -def test_function_calling_llama(openai_client): +def test_function_calling_llama(patched_openai_client): model = "meta.llama2-13b-chat-v1" - function_calling(model, openai_client) + function_calling(model, patched_openai_client) def function_calling(model, client): diff --git a/tests/streaming-assistants/test_run.py b/tests/streaming-assistants/test_run.py index b9cd9a3..3d0b643 100644 --- a/tests/streaming-assistants/test_run.py +++ b/tests/streaming-assistants/test_run.py @@ -52,46 +52,46 @@ def run_with_assistant(assistant, client): instructions="You're an animal expert who gives very long winded answers with flowery prose. Keep answers below 3 sentences." -def test_run_gpt3_5(openai_client): - gpt3_assistant = openai_client.beta.assistants.create( +def test_run_gpt3_5(patched_openai_client): + gpt3_assistant = patched_openai_client.beta.assistants.create( name="GPT3 Animal Tutor", instructions=instructions, model="gpt-3.5-turbo", ) - assistant = openai_client.beta.assistants.retrieve(gpt3_assistant.id) + assistant = patched_openai_client.beta.assistants.retrieve(gpt3_assistant.id) logger.info(assistant) - run_with_assistant(gpt3_assistant, openai_client) + run_with_assistant(gpt3_assistant, patched_openai_client) -def test_run_cohere(openai_client): - cohere_assistant = openai_client.beta.assistants.create( +def test_run_cohere(patched_openai_client): + cohere_assistant = patched_openai_client.beta.assistants.create( name="Cohere Animal Tutor", instructions=instructions, model="cohere/command", ) - run_with_assistant(cohere_assistant, openai_client) + run_with_assistant(cohere_assistant, patched_openai_client) -def test_run_perp(openai_client): - perplexity_assistant = openai_client.beta.assistants.create( +def test_run_perp(patched_openai_client): + perplexity_assistant = patched_openai_client.beta.assistants.create( name="Perplexity/Mixtral Animal Tutor", instructions=instructions, model="perplexity/mixtral-8x7b-instruct", ) - run_with_assistant(perplexity_assistant, openai_client) + run_with_assistant(perplexity_assistant, patched_openai_client) -def test_run_claude(openai_client): - claude_assistant = openai_client.beta.assistants.create( +def test_run_claude(patched_openai_client): + claude_assistant = patched_openai_client.beta.assistants.create( name="Claude Animal Tutor", instructions=instructions, model="anthropic.claude-v2", ) - run_with_assistant(claude_assistant, openai_client) + run_with_assistant(claude_assistant, patched_openai_client) -def test_run_gemini(openai_client): - gemini_assistant = openai_client.beta.assistants.create( +def test_run_gemini(patched_openai_client): + gemini_assistant = patched_openai_client.beta.assistants.create( name="Gemini Animal Tutor", instructions=instructions, model="gemini/gemini-pro", ) - run_with_assistant(gemini_assistant, openai_client) \ No newline at end of file + run_with_assistant(gemini_assistant, patched_openai_client) \ No newline at end of file diff --git a/tests/streaming-assistants/test_run_retreival.py b/tests/streaming-assistants/test_run_retreival.py index 8a08736..2bac06c 100644 --- a/tests/streaming-assistants/test_run_retreival.py +++ b/tests/streaming-assistants/test_run_retreival.py @@ -62,58 +62,58 @@ def run_with_assistant(assistant, client): instructions = "You are a personal math tutor. Answer thoroughly. The system will provide relevant context from files, use the context to respond." -def test_run_gpt3_5(openai_client): +def test_run_gpt3_5(patched_openai_client): model = "gpt-3.5-turbo" name = f"{model} Math Tutor" - gpt3_assistant = openai_client.beta.assistants.create( + gpt3_assistant = patched_openai_client.beta.assistants.create( name=name, instructions=instructions, model=model, ) - run_with_assistant(gpt3_assistant, openai_client) + run_with_assistant(gpt3_assistant, patched_openai_client) -def test_run_cohere(openai_client): +def test_run_cohere(patched_openai_client): model = "cohere/command" name = f"{model} Math Tutor" - cohere_assistant = openai_client.beta.assistants.create( + cohere_assistant = patched_openai_client.beta.assistants.create( name=name, instructions=instructions, model=model, ) - run_with_assistant(cohere_assistant, openai_client) + run_with_assistant(cohere_assistant, patched_openai_client) -def test_run_perp(openai_client): +def test_run_perp(patched_openai_client): model = "perplexity/mixtral-8x7b-instruct" name = f"{model} Math Tutor" - perplexity_assistant = openai_client.beta.assistants.create( + perplexity_assistant = patched_openai_client.beta.assistants.create( name=name, instructions=instructions, model=model, ) - run_with_assistant(perplexity_assistant, openai_client) + run_with_assistant(perplexity_assistant, patched_openai_client) -@pytest.mark.skip(reason="fix streaming-assistants aws with openai embedding issue") -def test_run_claude(openai_client): +@pytest.mark.skip(reason="fix streaming-assistants aws with patched_openai embedding issue") +def test_run_claude(patched_openai_client): model = "anthropic.claude-v2" name = f"{model} Math Tutor" - claude_assistant = openai_client.beta.assistants.create( + claude_assistant = patched_openai_client.beta.assistants.create( name=name, instructions=instructions, model=model, ) - run_with_assistant(claude_assistant, openai_client) + run_with_assistant(claude_assistant, patched_openai_client) -def test_run_gemini(openai_client): +def test_run_gemini(patched_openai_client): model = "gemini/gemini-pro" name = f"{model} Math Tutor" - gemini_assistant = openai_client.beta.assistants.create( + gemini_assistant = patched_openai_client.beta.assistants.create( name=name, instructions=instructions, model=model, ) - run_with_assistant(gemini_assistant, openai_client) \ No newline at end of file + run_with_assistant(gemini_assistant, patched_openai_client) \ No newline at end of file diff --git a/tests/streaming-assistants/test_streaming_run.py b/tests/streaming-assistants/test_streaming_run.py index 1a18484..a74b62d 100644 --- a/tests/streaming-assistants/test_streaming_run.py +++ b/tests/streaming-assistants/test_streaming_run.py @@ -50,46 +50,46 @@ def on_text_delta(self, delta, snapshot): instructions="You're an animal expert who gives very long winded answers with flowery prose. Keep answers below 3 sentences." -def test_run_gpt3_5(openai_client): - gpt3_assistant = openai_client.beta.assistants.create( +def test_run_gpt3_5(patched_openai_client): + gpt3_assistant = patched_openai_client.beta.assistants.create( name="GPT3 Animal Tutor", instructions=instructions, model="gpt-3.5-turbo", ) - assistant = openai_client.beta.assistants.retrieve(gpt3_assistant.id) + assistant = patched_openai_client.beta.assistants.retrieve(gpt3_assistant.id) logger.info(assistant) - run_with_assistant(gpt3_assistant, openai_client) + run_with_assistant(gpt3_assistant, patched_openai_client) -def test_run_cohere(openai_client): - cohere_assistant = openai_client.beta.assistants.create( +def test_run_cohere(patched_openai_client): + cohere_assistant = patched_openai_client.beta.assistants.create( name="Cohere Animal Tutor", instructions=instructions, model="cohere/command", ) - run_with_assistant(cohere_assistant, openai_client) + run_with_assistant(cohere_assistant, patched_openai_client) -def test_run_perp(openai_client): - perplexity_assistant = openai_client.beta.assistants.create( +def test_run_perp(patched_openai_client): + perplexity_assistant = patched_openai_client.beta.assistants.create( name="Perplexity/Mixtral Animal Tutor", instructions=instructions, model="perplexity/mixtral-8x7b-instruct", ) - run_with_assistant(perplexity_assistant, openai_client) + run_with_assistant(perplexity_assistant, patched_openai_client) -def test_run_claude(openai_client): - claude_assistant = openai_client.beta.assistants.create( +def test_run_claude(patched_openai_client): + claude_assistant = patched_openai_client.beta.assistants.create( name="Claude Animal Tutor", instructions=instructions, model="anthropic.claude-v2", ) - run_with_assistant(claude_assistant, openai_client) + run_with_assistant(claude_assistant, patched_openai_client) -def test_run_gemini(openai_client): - gemini_assistant = openai_client.beta.assistants.create( +def test_run_gemini(patched_openai_client): + gemini_assistant = patched_openai_client.beta.assistants.create( name="Gemini Animal Tutor", instructions=instructions, model="gemini/gemini-pro", ) - run_with_assistant(gemini_assistant, openai_client) \ No newline at end of file + run_with_assistant(gemini_assistant, patched_openai_client) \ No newline at end of file diff --git a/tests/streaming-assistants/test_streaming_run_retrieval.py b/tests/streaming-assistants/test_streaming_run_retrieval.py index 8ef9747..fe5d8ea 100644 --- a/tests/streaming-assistants/test_streaming_run_retrieval.py +++ b/tests/streaming-assistants/test_streaming_run_retrieval.py @@ -49,58 +49,58 @@ def run_with_assistant(assistant, client): instructions = "You are a personal math tutor. Answer thoroughly. The system will provide relevant context from files, use the context to respond." -def test_run_gpt3_5(openai_client): +def test_run_gpt3_5(patched_openai_client): model = "gpt-3.5-turbo" name = f"{model} Math Tutor" - gpt3_assistant = openai_client.beta.assistants.create( + gpt3_assistant = patched_openai_client.beta.assistants.create( name=name, instructions=instructions, model=model, ) - run_with_assistant(gpt3_assistant, openai_client) + run_with_assistant(gpt3_assistant, patched_openai_client) -def test_run_cohere(openai_client): +def test_run_cohere(patched_openai_client): model = "cohere/command" name = f"{model} Math Tutor" - cohere_assistant = openai_client.beta.assistants.create( + cohere_assistant = patched_openai_client.beta.assistants.create( name=name, instructions=instructions, model=model, ) - run_with_assistant(cohere_assistant, openai_client) + run_with_assistant(cohere_assistant, patched_openai_client) -def test_run_perp(openai_client): +def test_run_perp(patched_openai_client): model = "perplexity/mixtral-8x7b-instruct" name = f"{model} Math Tutor" - perplexity_assistant = openai_client.beta.assistants.create( + perplexity_assistant = patched_openai_client.beta.assistants.create( name=name, instructions=instructions, model=model, ) - run_with_assistant(perplexity_assistant, openai_client) + run_with_assistant(perplexity_assistant, patched_openai_client) @pytest.mark.skip(reason="fix streaming-assistants aws with openai embedding issue") -def test_run_claude(openai_client): +def test_run_claude(patched_openai_client): model = "anthropic.claude-v2" name = f"{model} Math Tutor" - claude_assistant = openai_client.beta.assistants.create( + claude_assistant = patched_openai_client.beta.assistants.create( name=name, instructions=instructions, model=model, ) - run_with_assistant(claude_assistant, openai_client) + run_with_assistant(claude_assistant, patched_openai_client) -def test_run_gemini(openai_client): +def test_run_gemini(patched_openai_client): model = "gemini/gemini-pro" name = f"{model} Math Tutor" - gemini_assistant = openai_client.beta.assistants.create( + gemini_assistant = patched_openai_client.beta.assistants.create( name=name, instructions=instructions, model=model, ) - run_with_assistant(gemini_assistant, openai_client) \ No newline at end of file + run_with_assistant(gemini_assistant, patched_openai_client) \ No newline at end of file