forked from sinaptik-ai/pandas-ai
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest_huggingface_text_gen.py
79 lines (62 loc) · 2.29 KB
/
test_huggingface_text_gen.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
"""Unit tests for the HuggingFaceTextGen LLM class"""
from pandasai_huggingface.huggingface_text_gen import (
HuggingFaceTextGen,
)
from pandasai.core.prompts.base import BasePrompt
class MockBasePrompt(BasePrompt):
template: str = "instruction."
class MockResponse:
generated_text: str = ""
def __init__(self, generated_text):
self.generated_text = generated_text
class TestHuggingFaceTextGen:
"""Unit tests for the HuggingFaceTextGen LLM class"""
def test_type_with_token(self):
assert (
HuggingFaceTextGen(inference_server_url="http://127.0.0.1:8080").type
== "huggingface-text-generation"
)
def test_params_setting(self):
llm = HuggingFaceTextGen(
inference_server_url="http://127.0.0.1:8080",
max_new_tokens=1024,
top_p=0.8,
typical_p=0.8,
temperature=1e-3,
stop_sequences=["\n"],
seed=0,
do_sample=False,
streaming=True,
timeout=120,
)
assert llm.client.base_url == "http://127.0.0.1:8080"
assert llm.max_new_tokens == 1024
assert llm.top_p == 0.8
assert llm.temperature == 0.001
assert llm.stop_sequences == ["\n"]
assert llm.seed == 0
assert not llm.do_sample
assert llm.streaming
assert llm.timeout == 120
def test_completion(self, mocker):
tgi_mock = mocker.patch("text_generation.Client.generate")
expected_text = "This is the generated text."
tgi_mock.return_value = MockResponse(expected_text)
llm = HuggingFaceTextGen(inference_server_url="http://127.0.0.1:8080")
instruction = MockBasePrompt()
result = llm.call(instruction)
tgi_mock.assert_called_once_with(
instruction.to_string(),
max_new_tokens=llm.max_new_tokens,
top_k=llm.top_k,
top_p=llm.top_p,
typical_p=llm.typical_p,
temperature=llm.temperature,
repetition_penalty=llm.repetition_penalty,
truncate=llm.truncate,
stop_sequences=llm.stop_sequences,
do_sample=llm.do_sample,
seed=llm.seed,
)
assert result == expected_text
assert tgi_mock.call_count == 1