forked from xtekky/gpt4free
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathAutonomousAI.py
87 lines (76 loc) · 3.41 KB
/
AutonomousAI.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
from __future__ import annotations
from aiohttp import ClientSession
import base64
import json
from ..typing import AsyncResult, Messages
from ..requests.raise_for_status import raise_for_status
from ..providers.response import FinishReason
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
class AutonomousAI(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.autonomous.ai/anon/"
api_endpoints = {
"llama": "https://chatgpt.autonomous.ai/api/v1/ai/chat",
"qwen_coder": "https://chatgpt.autonomous.ai/api/v1/ai/chat",
"hermes": "https://chatgpt.autonomous.ai/api/v1/ai/chat-hermes",
"vision": "https://chatgpt.autonomous.ai/api/v1/ai/chat-vision",
"summary": "https://chatgpt.autonomous.ai/api/v1/ai/summary"
}
working = True
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = "llama"
models = [default_model, "qwen_coder", "hermes", "vision", "summary"]
model_aliases = {
"llama-3.3-70b": default_model,
"qwen-2.5-coder-32b": "qwen_coder",
"hermes-3": "hermes",
"llama-3.2-90b": "vision",
}
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
stream: bool = False,
**kwargs
) -> AsyncResult:
api_endpoint = cls.api_endpoints[model]
headers = {
'accept': '*/*',
'accept-language': 'en-US,en;q=0.9',
'content-type': 'application/json',
'country-code': 'US',
'origin': 'https://www.autonomous.ai',
'referer': 'https://www.autonomous.ai/',
'time-zone': 'America/New_York',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
}
async with ClientSession(headers=headers) as session:
message_json = json.dumps(messages)
encoded_message = base64.b64encode(message_json.encode()).decode(errors="ignore")
data = {
"messages": encoded_message,
"threadId": model,
"stream": stream,
"aiAgent": model
}
async with session.post(api_endpoint, json=data, proxy=proxy) as response:
await raise_for_status(response)
async for chunk in response.content:
if chunk:
chunk_str = chunk.decode()
if chunk_str == "data: [DONE]":
continue
try:
# Remove "data: " prefix and parse JSON
chunk_data = json.loads(chunk_str.replace("data: ", ""))
if "choices" in chunk_data and chunk_data["choices"]:
delta = chunk_data["choices"][0].get("delta", {})
if "content" in delta and delta["content"]:
yield delta["content"]
if "finish_reason" in chunk_data and chunk_data["finish_reason"]:
yield FinishReason(chunk_data["finish_reason"])
except json.JSONDecodeError:
continue