-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathllm.py
More file actions
executable file
·145 lines (109 loc) · 4.21 KB
/
llm.py
File metadata and controls
executable file
·145 lines (109 loc) · 4.21 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
from dataclasses import dataclass, field
from openai import OpenAI
import os
from dotenv import load_dotenv
from PIL import Image
from pydantic import BaseModel
from typing import Type, cast
import base64
import io
load_dotenv()
GEMINI_API_KEY =os.getenv("GEMINI_API_KEY")
def encode_image_from_path(image_path:str) -> str:
""" Encodes an image to Base64
Args:
image_path (str): path to image on disk
Returns:
str: Base64 encoded image
"""
with open(file=image_path, mode= "rb") as image_file:
return base64.b64encode(image_file.read()).decode("utf-8")
def encode_pil_image(image: Image.Image, format: str = "PNG") -> str:
"""
Encode a PIL Image.Image to a Base64 string.
Args:
image (PIL.Image.Image): The PIL image to encode.
format (str): Image format for encoding (default: "PNG").
Returns:
str: Base64-encoded image.
"""
buffer = io.BytesIO()
image.save(buffer, format=format)
buffer.seek(0)
return base64.b64encode(buffer.read()).decode("utf-8")
@dataclass
class LLM:
api_key: str | None = GEMINI_API_KEY
base_url: str | None = "https://generativelanguage.googleapis.com/v1beta/openai/"
client:OpenAI = field(init=False)
model: str = "gemini-3-flash-preview"
sys_prompt:str | None = None
user_prompt: str = ""
image:Image.Image| None = None
output_model: Type[BaseModel] | None = None
def __post_init__(self) -> None:
self.client = OpenAI(api_key=self.api_key,
base_url= self.base_url)
def get_messages(self) -> list:
messages = []
# System message (optional)
if self.sys_prompt:
messages.append({
"role": "system",
"content": self.sys_prompt
})
# User message
if self.image:
encoded_image = encode_pil_image(self.image)
user_content = [
{"type": "text", "text": self.user_prompt},
{
"type": "image_url",
"image_url": {"url": f"data:image/png;base64,{encoded_image}"}
}
]
else:
user_content = self.user_prompt
messages.append({
"role": "user",
"content": user_content
})
return messages
def invoke(self) -> BaseModel | str | None:
try:
messages = self.get_messages()
if self.output_model:
response = self.client.chat.completions.parse( # type: ignore
model=self.model,
messages=messages,
response_format=cast(Type[BaseModel], self.output_model),
)
return response.choices[0].message.parsed
else:
response = self.client.chat.completions.create(
model=self.model,
messages=messages,
)
return response.choices[0].message.content
except Exception as e:
print(f"LLM.invoke() error: {e}")
return None
sys_prompt_invoice = """You are an invoice extraction assistant. Extract all fields according to the provided schema.
- If a field is missing in the invoice, return null.
- For list fields like "items", return an empty list if there are no items.
- Do not invent or guess values.
- Output must strictly follow the schema."""
user_prompt_invoice = "Extract all invoice information from the attached image."
if __name__ == "__main__":
pass
# print("Loading invoice")
# if (invoices := load_invoice_images(path="invoices/invoice_2.pdf")):
# print("normalizing invoice")
# invoice = normalize_invoice_image(img= invoices[0])
# print("Calling llm")
# # response = LLM(sys_prompt=sys_prompt,
# # user_prompt=user_prompt,
# # image=invoice,
# # output_model=Invoice).invoke()
# response = LLM(user_prompt="What is the capital of France?").invoke()
# print(response)