Skip to content

Commit 529ada7

Browse files
committed
feat(bot): switch from gemini to groq
1 parent 50a5e4a commit 529ada7

File tree

6 files changed

+29
-22
lines changed

6 files changed

+29
-22
lines changed

.env.example

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -76,9 +76,9 @@ BOT_API_PORT=6002
7676

7777
################# Miscellaneous ##################
7878

79-
# Google Gemini
80-
GEMINI_KEY=
81-
GEMINI_MODEL=gemini-1.5-flash
79+
# Groq
80+
GROQ_KEY=
81+
GROQ_MODEL=llama-3.3-70b-versatile
8282

8383
# Bot list tokens
8484
TOPGG_TOKEN=

classes/bot.py

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,13 +8,13 @@
88
import aiohttp
99
import aioredis
1010
import asyncpg
11-
import google.generativeai as genai
1211
import orjson
1312

1413
from discord.ext import commands
1514
from discord.ext.commands.core import _CaseInsensitiveDict
1615
from discord.gateway import DiscordClientWebSocketResponse, DiscordWebSocket
1716
from discord.utils import parse_time
17+
from groq import AsyncGroq
1818

1919
from classes.http import HTTPClient
2020
from classes.misc import Session, Status
@@ -215,6 +215,13 @@ async def on_http_request_end(self, _session, trace_config_ctx, params):
215215
}
216216
)
217217

218+
async def ai_generate(self, text):
219+
completion = await self.ai.chat.completions.create(
220+
messages=[{"role": "user", "content": text}],
221+
model=self.config.GROQ_MODEL,
222+
)
223+
return completion.choices[0].message.content
224+
218225
async def start(self, worker=True):
219226
trace_config = aiohttp.TraceConfig()
220227
trace_config.on_request_start.append(self.on_http_request_start)
@@ -257,9 +264,8 @@ async def start(self, worker=True):
257264
self.prom = Prometheus(self)
258265
await self.prom.start()
259266

260-
if self.config.GEMINI_KEY is not None:
261-
genai.configure(api_key=self.config.GEMINI_KEY)
262-
self.ai = genai.GenerativeModel(self.config.GEMINI_MODEL)
267+
if self.config.GROQ_KEY is not None:
268+
self.ai = AsyncGroq(api_key=self.config.GROQ_KEY)
263269

264270
self._connection = State(
265271
id=self.id,

cogs/core.py

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -58,18 +58,19 @@ async def aireply(self, ctx, *, instructions: str = None):
5858
data = await tools.get_data(self.bot, ctx.guild.id)
5959

6060
history = await self.generate_history(ctx.channel)
61+
truncated_history = "\n".join(history.splitlines()[-100:])
6162
prompt = (
6263
"You are a Discord moderator for a server. The following is the entire history of "
6364
"the conversation between staff and the user. Please fill in the suitable response "
6465
"given the transcript. Only give 1 response option. Do not output additional text such "
6566
"as 'My response would be...'. Try to appear as supportive as possible.\nHere are "
6667
f"additional information you should consider (if any): {data[13]}\nHere are additional "
6768
f"instructions for your response (if any): {instructions}\n\nFull transcript: "
68-
f"{history}.\n\nStaff response: "
69+
f"{truncated_history}.\n\nStaff response: "
6970
)
7071

7172
try:
72-
response = await self.bot.ai.generate_content_async(prompt)
73+
response = await self.bot.ai_generate(prompt)
7374
except Exception:
7475
await ctx.send(ErrorEmbed("Failed to generate a response."))
7576
return
@@ -79,7 +80,7 @@ async def aireply(self, ctx, *, instructions: str = None):
7980
except (discord.Forbidden, discord.NotFound):
8081
pass
8182

82-
msg = await ctx.send(Embed("AI Reply", response.text[:2048]))
83+
msg = await ctx.send(Embed("AI Reply", response[:2048]))
8384

8485
await msg.add_reaction("✅")
8586
await msg.add_reaction("❌")
@@ -235,14 +236,14 @@ async def close_channel(self, ctx, reason, anon: bool = False):
235236

236237
if self.bot.ai is not None and data[7] == 1:
237238
try:
238-
summary = await self.bot.ai.generate_content_async(
239+
truncated_history = "\n".join(history.splitlines()[-100:])
240+
summary = await self.bot.ai_generate(
239241
"The following is the entire history of the conversation between staff and "
240-
"the user. Please summarise the entire interaction into 1 or 2 sentences, "
241-
"with at most 20 words. Only give 1 response option. Do not output "
242-
"additional text such as 'My response would be...'.\n\nFull transcript:\n"
243-
+ history
242+
"the user. Please summarise the entire interaction into 1 or 2 sentences. "
243+
"Only give 1 response option. Do not output additional text such as 'Here "
244+
"is the summary...'.\n\nFull transcript:\n" + truncated_history
244245
)
245-
embed.add_field("AI Summary", summary.text)
246+
embed.add_field("AI Summary", summary[:1024])
246247
except Exception:
247248
pass
248249

docker/.env.example

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,6 @@ BASE_URI=http://localhost:8000
2626

2727
################# Miscellaneous ##################
2828

29-
# Google Gemini
30-
GEMINI_KEY=
31-
GEMINI_MODEL=gemini-1.5-flash
29+
# Groq
30+
GROQ_KEY=
31+
GROQ_MODEL=llama-3.3-70b-versatile

docker/docker-compose.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -83,8 +83,8 @@ services:
8383
- BASE_URI=${BASE_URI}
8484
- BOT_API_HOST=0.0.0.0
8585
- BOT_API_PORT=6002
86-
- GEMINI_KEY=${GEMINI_KEY}
87-
- GEMINI_MODEL=${GEMINI_MODEL}
86+
- GROQ_KEY=${GROQ_KEY}
87+
- GROQ_MODEL=${GROQ_MODEL}
8888
- TOPGG_TOKEN=
8989
- DBOTS_TOKEN=
9090
- DBL_TOKEN=

requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ aioprometheus==23.3.0
44
aioredis==1.3.1
55
asyncpg==0.29.0
66
dateparser==1.2.0
7-
google-generativeai==0.8.3
7+
groq==0.13.0
88
orjson==3.9.13
99
psutil==5.9.8
1010
python-dotenv==1.0.1

0 commit comments

Comments
 (0)