Skip to content

Commit 2fb383f

Browse files
authored
Merge pull request #209 from FullStackWithLawrence/next
fix: add unit tests for common
2 parents 6c30496 + ef7123f commit 2fb383f

File tree

6 files changed

+175
-14
lines changed

6 files changed

+175
-14
lines changed
Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
{
2+
"model": "gpt-3.5-turbo",
3+
"object": "chat.completion",
4+
"temperature": 0,
5+
"max_tokens": 256,
6+
"messages": [
7+
{
8+
"role": "system",
9+
"content": "you always return the integer value 42."
10+
},
11+
{
12+
"role": "user",
13+
"content": "return the integer value 42."
14+
}
15+
],
16+
"chat_history": [
17+
{
18+
"message": "Hello.",
19+
"direction": "incoming",
20+
"sentTime": "11/16/2023, 5:53:32 PM",
21+
"sender": "system"
22+
}
23+
]
24+
}
Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
{
2+
"retval": {
3+
"isBase64Encoded": false,
4+
"statusCode": 200,
5+
"headers": {
6+
"Content-Type": "application/json"
7+
},
8+
"body": {
9+
"id": "chatcmpl-8WtM7gZIqvLQKPlNi9ALm7Im6CdXa",
10+
"choices": [
11+
{
12+
"finish_reason": "stop",
13+
"index": 0,
14+
"message": {
15+
"content": "42",
16+
"role": "assistant",
17+
"function_call": null,
18+
"tool_calls": null
19+
},
20+
"logprobs": null
21+
}
22+
],
23+
"created": 1702849975,
24+
"model": "gpt-3.5-turbo-0613",
25+
"object": "chat.completion",
26+
"system_fingerprint": null,
27+
"usage": {
28+
"completion_tokens": 1,
29+
"prompt_tokens": 23,
30+
"total_tokens": 24
31+
},
32+
"request_meta_data": {
33+
"lambda": "lambda_openai_v2",
34+
"model": "gpt-3.5-turbo",
35+
"object_type": "chat.completion",
36+
"temperature": 0,
37+
"max_tokens": 256
38+
}
39+
}
40+
}
41+
}
Lines changed: 110 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,110 @@
1+
# -*- coding: utf-8 -*-
2+
# pylint: disable=wrong-import-position
3+
"""Test configuration Settings class."""
4+
5+
# python stuff
6+
import json
7+
import os
8+
import sys
9+
import unittest
10+
from pathlib import Path
11+
12+
13+
HERE = os.path.abspath(os.path.dirname(__file__))
14+
PROJECT_ROOT = str(Path(HERE).parent.parent)
15+
PYTHON_ROOT = str(Path(PROJECT_ROOT).parent)
16+
if PYTHON_ROOT not in sys.path:
17+
sys.path.append(PYTHON_ROOT) # noqa: E402
18+
19+
from openai_api.common.const import OpenAIMessageKeys # noqa: E402
20+
21+
# our stuff
22+
from openai_api.common.tests.test_setup import get_test_file # noqa: E402
23+
from openai_api.common.utils import ( # noqa: E402
24+
exception_response_factory,
25+
get_content_for_role,
26+
get_message_history,
27+
get_messages_for_role,
28+
get_request_body,
29+
http_response_factory,
30+
parse_request,
31+
)
32+
33+
34+
class TestUtils(unittest.TestCase):
35+
"""Test utils."""
36+
37+
# Get the directory of the current script
38+
here = HERE
39+
request = get_test_file("json/passthrough_openai_v2_request.json")
40+
response = get_test_file("json/passthrough_openai_v2_response.json")
41+
42+
def setUp(self):
43+
"""Set up test fixtures."""
44+
45+
def test_http_response_factory(self):
46+
"""Test test_http_response_factory."""
47+
retval = http_response_factory(200, self.response)
48+
self.assertEqual(retval["statusCode"], 200)
49+
self.assertEqual(retval["body"], json.dumps(self.response))
50+
self.assertEqual(retval["isBase64Encoded"], False)
51+
self.assertEqual(retval["headers"]["Content-Type"], "application/json")
52+
53+
def test_exception_response_factory(self):
54+
"""Test exception_response_factory."""
55+
try:
56+
raise AssertionError("test")
57+
except AssertionError as exception:
58+
retval = exception_response_factory(exception)
59+
self.assertIn("error", retval)
60+
self.assertIn("description", retval)
61+
62+
def test_get_request_body(self):
63+
"""Test get_request_body"""
64+
request_body = get_request_body(self.request)
65+
self.assertEqual(request_body, self.request)
66+
self.assertEqual(request_body["model"], "gpt-3.5-turbo")
67+
self.assertEqual(request_body["object"], "chat.completion")
68+
self.assertIn("temperature", request_body)
69+
self.assertIn("max_tokens", request_body)
70+
self.assertIn("messages", request_body)
71+
72+
def test_parse_request(self):
73+
"""Test parse_request"""
74+
request_body = get_request_body(self.request)
75+
object_type, model, messages, input_text, temperature, max_tokens = parse_request(request_body)
76+
self.assertEqual(object_type, "chat.completion")
77+
self.assertEqual(model, "gpt-3.5-turbo")
78+
self.assertEqual(input_text, None)
79+
self.assertEqual(temperature, 0)
80+
self.assertEqual(max_tokens, 256)
81+
self.assertEqual(len(messages), 2)
82+
83+
def test_get_content_for_role(self):
84+
"""Test get_content_for_role"""
85+
request_body = get_request_body(self.request)
86+
_, _, messages, _, _, _ = parse_request(request_body)
87+
system_message = get_content_for_role(messages, OpenAIMessageKeys.OPENAI_SYSTEM_MESSAGE_KEY)
88+
user_message = get_content_for_role(messages, OpenAIMessageKeys.OPENAI_USER_MESSAGE_KEY)
89+
self.assertEqual(system_message, "you always return the integer value 42.")
90+
self.assertEqual(user_message, "return the integer value 42.")
91+
92+
def test_get_message_history(self):
93+
"""test get_message_history"""
94+
request_body = get_request_body(self.request)
95+
_, _, messages, _, _, _ = parse_request(request_body)
96+
message_history = get_message_history(messages)
97+
self.assertIsInstance(message_history, list)
98+
self.assertEqual(len(message_history), 1)
99+
self.assertEqual(message_history[0]["role"], "user")
100+
self.assertEqual(message_history[0]["content"], "return the integer value 42.")
101+
102+
def test_get_messages_for_role(self):
103+
"""test get_messages_for_role"""
104+
request_body = get_request_body(self.request)
105+
_, _, messages, _, _, _ = parse_request(request_body)
106+
message_history = get_message_history(messages)
107+
self.assertIsInstance(message_history, list)
108+
user_messages = get_messages_for_role(message_history, OpenAIMessageKeys.OPENAI_USER_MESSAGE_KEY)
109+
self.assertEqual(len(user_messages), 1)
110+
self.assertEqual(user_messages[0], "return the integer value 42.")

api/terraform/python/openai_api/common/utils.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -174,9 +174,3 @@ def get_messages_for_role(messages: list, role: str) -> list:
174174
"""Get the text content from the messages list for a given role"""
175175
retval = [d.get("content") for d in messages if d["role"] == role]
176176
return retval
177-
178-
179-
def get_messages_for_type(messages: list, message_type: str) -> list:
180-
"""Get the text content from the messages list for a given role"""
181-
retval = [d.get("content") for d in messages if d["type"] == message_type]
182-
return retval

api/terraform/python/openai_api/lambda_langchain/tests/test_lambda_langchain.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -31,10 +31,6 @@ class TestLambdaLangchain(unittest.TestCase):
3131
def setUp(self):
3232
"""Set up test fixtures."""
3333

34-
def get_event(self, event):
35-
"""Get the event json from the mock file."""
36-
return event["event"]
37-
3834
def test_lambda_handler(self):
3935
"""Test lambda_handler."""
4036
response = handler(self.event, None)

api/terraform/python/openai_api/lambda_openai_v2/tests/test_lambda_openai_v2.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -32,10 +32,6 @@ class TestLambdaOpenai(unittest.TestCase):
3232
def setUp(self):
3333
"""Set up test fixtures."""
3434

35-
def get_event(self, event):
36-
"""Get the event json from the mock file."""
37-
return event["event"]
38-
3935
def test_lambda_handler(self):
4036
"""Test lambda_handler."""
4137
response = handler(self.event, None)

0 commit comments

Comments
 (0)