-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathllm.py
More file actions
77 lines (64 loc) · 2.42 KB
/
llm.py
File metadata and controls
77 lines (64 loc) · 2.42 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import os
from openai import OpenAI
from dotenv import load_dotenv
from pydantic import BaseModel
from typing import Optional
load_dotenv()
openai_client = OpenAI()
together_client = OpenAI(base_url="https://api.together.xyz/v1",
api_key=os.environ["TOGETHER_AI_API_KEY"])
class LLMResponse(BaseModel):
success: bool
results: Optional[list] = None
def run_mistral_llm(system_prompt, user_prompt) -> LLMResponse:
print("running mistral on Together.ai")
completion = together_client.chat.completions.create(
model="mistralai/Mistral-7B-Instruct-v0.2",
temperature=0.2,
messages=[
{"role": "system",
"content": system_prompt},
{"role": "user", "content": user_prompt}
]
)
content = completion.choices[0].message.content
# together.ai output doesn't enforce json output
content = content[content.find("{"): content.find("}")+1]
print("Together.ai mistral output:", content)
response = LLMResponse.model_validate_json(content)
return response
def run_mistral_tuned_llm(system_prompt, user_prompt) -> LLMResponse:
print("running mistral_tuned on Together.ai")
completion = together_client.chat.completions.create(
model="zetyquickly@googlemail.com/Mistral-7B-Instruct-v0.2-finetune-example-2024-01-14-03-07-26",
temperature=0.2,
messages=[
{"role": "system",
"content": system_prompt},
{"role": "user", "content": user_prompt}
]
)
content = completion.choices[0].message.content
# together.ai output doesn't enforce json output
content = content[content.find("{"): content.find("}")+1]
print("Together.ai mistral tuned output:", content)
response = LLMResponse.model_validate_json(content)
return response
def run_openai_llm(system_prompt, user_prompt) -> LLMResponse:
print("running OpenAI model")
completion = openai_client.chat.completions.create(
model="gpt-4-1106-preview",
response_format={"type": "json_object"},
temperature=0.2,
messages=[
{"role": "system",
"content": system_prompt},
{"role": "user", "content": user_prompt}
]
)
content = completion.choices[0].message.content
print("OpenAI output:", content)
# breakpoint()
# breakpoint()
response = LLMResponse.model_validate_json(content)
return response