forked from sinaptik-ai/pandas-ai
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest_openai.py
140 lines (112 loc) · 4.41 KB
/
test_openai.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
"""Unit tests for the openai LLM class"""
import os
from unittest import mock
import openai
import pytest
from extensions.llms.openai.pandasai_openai import OpenAI
from pandasai.core.prompts.base import BasePrompt
from pandasai.exceptions import APIKeyNotFoundError, UnsupportedModelError
class OpenAIObject:
def __init__(self, dictionary):
self.__dict__.update(dictionary)
class TestOpenAILLM:
"""Unit tests for the openai LLM class"""
@pytest.fixture
def prompt(self):
class MockBasePrompt(BasePrompt):
template: str = "instruction"
return MockBasePrompt()
def test_type_without_token(self):
with mock.patch.dict(os.environ, clear=True):
with pytest.raises(APIKeyNotFoundError):
OpenAI()
def test_type_with_token(self):
assert OpenAI(api_token="test").type == "openai"
def test_proxy(self):
proxy = "http://proxy.mycompany.com:8080"
client = OpenAI(api_token="test", openai_proxy=proxy)
assert client.openai_proxy == proxy
assert openai.proxy["http"] == proxy
assert openai.proxy["https"] == proxy
def test_params_setting(self):
llm = OpenAI(
api_token="test",
model="gpt-3.5-turbo",
temperature=0.5,
max_tokens=50,
top_p=1.0,
frequency_penalty=2.0,
presence_penalty=3.0,
stop=["\n"],
)
assert llm.model == "gpt-3.5-turbo"
assert llm.temperature == 0.5
assert llm.max_tokens == 50
assert llm.top_p == 1.0
assert llm.frequency_penalty == 2.0
assert llm.presence_penalty == 3.0
assert llm.stop == ["\n"]
def test_completion(self, mocker):
expected_text = "This is the generated text."
expected_response = OpenAIObject(
{
"choices": [{"text": expected_text}],
"usage": {
"prompt_tokens": 2,
"completion_tokens": 1,
"total_tokens": 3,
},
"model": "gpt-35-turbo",
}
)
openai = OpenAI(api_token="test")
mocker.patch.object(openai, "completion", return_value=expected_response)
result = openai.completion("Some prompt.")
openai.completion.assert_called_once_with("Some prompt.")
assert result == expected_response
def test_chat_completion(self, mocker):
openai = OpenAI(api_token="test")
expected_response = OpenAIObject(
{
"choices": [
{
"text": "Hello, how can I help you today?",
"index": 0,
"logprobs": None,
"finish_reason": "stop",
"start_text": "",
}
]
}
)
mocker.patch.object(openai, "chat_completion", return_value=expected_response)
result = openai.chat_completion("Hi")
openai.chat_completion.assert_called_once_with("Hi")
assert result == expected_response
def test_call_with_unsupported_model(self, prompt):
with pytest.raises(
UnsupportedModelError,
match=(
"Unsupported model: The model 'not a model' doesn't exist "
"or is not supported yet."
),
):
llm = OpenAI(api_token="test", model="not a model")
llm.call(instruction=prompt)
def test_call_supported_completion_model(self, mocker, prompt):
openai = OpenAI(api_token="test", model="gpt-3.5-turbo-instruct")
mocker.patch.object(openai, "completion", return_value="response")
result = openai.call(instruction=prompt)
assert result == "response"
def test_call_supported_chat_model(self, mocker, prompt):
openai = OpenAI(api_token="test", model="gpt-4")
mocker.patch.object(openai, "chat_completion", return_value="response")
result = openai.call(instruction=prompt)
assert result == "response"
def test_call_with_system_prompt(self, mocker, prompt):
openai = OpenAI(
api_token="test", model="ft:gpt-3.5-turbo:my-org:custom_suffix:id"
)
mocker.patch.object(openai, "chat_completion", return_value="response")
result = openai.call(instruction=prompt)
assert result == "response"