forked from xtekky/gpt4free
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathJmuz.py
65 lines (59 loc) · 1.92 KB
/
Jmuz.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
from __future__ import annotations
from ..typing import AsyncResult, Messages
from .needs_auth.OpenaiAPI import OpenaiAPI
class Jmuz(OpenaiAPI):
label = "Jmuz"
url = "https://discord.gg/qXfu24JmsB"
login_url = None
api_base = "https://jmuz.me/gpt/api/v2"
api_key = "prod"
working = True
needs_auth = False
supports_stream = True
supports_system_message = False
default_model = "gpt-4o"
model_aliases = {
"gemini": "gemini-exp",
"deepseek-chat": "deepseek-2.5",
"qwq-32b": "qwq-32b-preview"
}
@classmethod
def get_models(cls):
if not cls.models:
cls.models = super().get_models(api_key=cls.api_key, api_base=cls.api_base)
return cls.models
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
stream: bool = False,
api_key: str = None,
api_base: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
headers = {
"Authorization": f"Bearer {cls.api_key}",
"Content-Type": "application/json",
"accept": "*/*",
"cache-control": "no-cache",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36"
}
started = False
async for chunk in super().create_async_generator(
model=model,
messages=messages,
api_base=cls.api_base,
api_key=cls.api_key,
stream=cls.supports_stream,
headers=headers,
**kwargs
):
if isinstance(chunk, str) and cls.url in chunk:
continue
if isinstance(chunk, str) and not started:
chunk = chunk.lstrip()
if chunk:
started = True
yield chunk