|
1 | 1 | __author__ = "thiagocastroferreira" |
2 | 2 |
|
3 | | -import pytest |
4 | 3 |
|
5 | 4 | from aixplain.enums import Function |
6 | 5 | from aixplain.factories import ModelFactory |
7 | 6 | from aixplain.modules import LLM |
| 7 | +from datetime import datetime, timedelta, timezone |
| 8 | + |
| 9 | + |
| 10 | +def pytest_generate_tests(metafunc): |
| 11 | + if "llm_model" in metafunc.fixturenames: |
| 12 | + four_weeks_ago = datetime.now(timezone.utc) - timedelta(weeks=4) |
| 13 | + models = ModelFactory.list(function=Function.TEXT_GENERATION)["results"] |
| 14 | + |
| 15 | + predefined_models = ["Groq Llama 3 70B", "Chat GPT 3.5", "GPT-4o", "GPT 4 (32k)"] |
| 16 | + recent_models = [model for model in models if model.created_at and model.created_at >= four_weeks_ago] |
| 17 | + combined_models = recent_models + [ |
| 18 | + ModelFactory.list(query=model, function=Function.TEXT_GENERATION)["results"][0] for model in predefined_models |
| 19 | + ] |
| 20 | + metafunc.parametrize("llm_model", combined_models) |
8 | 21 |
|
9 | 22 |
|
10 | | -@pytest.mark.parametrize("llm_model", ["Groq Llama 3 70B", "Chat GPT 3.5", "GPT-4o", "GPT 4 (32k)"]) |
11 | 23 | def test_llm_run(llm_model): |
12 | 24 | """Testing LLMs with history context""" |
13 | | - model = ModelFactory.list(query=llm_model, function=Function.TEXT_GENERATION)["results"][0] |
14 | 25 |
|
15 | | - assert isinstance(model, LLM) |
| 26 | + assert isinstance(llm_model, LLM) |
16 | 27 |
|
17 | | - response = model.run( |
| 28 | + response = llm_model.run( |
18 | 29 | data="What is my name?", |
19 | 30 | history=[{"role": "user", "content": "Hello! My name is Thiago."}, {"role": "assistant", "content": "Hello!"}], |
20 | 31 | ) |
|
0 commit comments