summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/Aivvm.py
diff options
context:
space:
mode:
Diffstat (limited to 'g4f/Provider/Aivvm.py')
-rw-r--r--g4f/Provider/Aivvm.py51
1 files changed, 29 insertions, 22 deletions
diff --git a/g4f/Provider/Aivvm.py b/g4f/Provider/Aivvm.py
index 1a3b6f0b..1ba6d6f1 100644
--- a/g4f/Provider/Aivvm.py
+++ b/g4f/Provider/Aivvm.py
@@ -1,8 +1,8 @@
from __future__ import annotations
+import requests
-from ..requests import StreamSession
-from .base_provider import AsyncGeneratorProvider
-from ..typing import AsyncGenerator
+from .base_provider import BaseProvider
+from ..typing import CreateResult
# to recreate this easily, send a post request to https://chat.aivvm.com/api/models
models = {
@@ -16,26 +16,39 @@ models = {
'gpt-4-32k-0613': {'id': 'gpt-4-32k-0613', 'name': 'GPT-4-32K-0613'},
}
-class Aivvm(AsyncGeneratorProvider):
+class Aivvm(BaseProvider):
url = 'https://chat.aivvm.com'
+ supports_stream = True
+ working = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
- working = True
@classmethod
- async def create_async_generator(
- cls,
+ def create_completion(cls,
model: str,
messages: list[dict[str, str]],
stream: bool,
- timeout: int = 30,
**kwargs
- ) -> AsyncGenerator:
+ ) -> CreateResult:
if not model:
model = "gpt-3.5-turbo"
elif model not in models:
raise ValueError(f"Model is not supported: {model}")
+ headers = {
+ "accept" : "*/*",
+ "accept-language" : "hu-HU,hu;q=0.9,en-US;q=0.8,en;q=0.7",
+ "content-type" : "application/json",
+ "sec-ch-ua" : "\"Kuki\";v=\"116\", \"Not)A;Brand\";v=\"24\", \"Pici Pocoro\";v=\"102\"",
+ "sec-ch-ua-mobile" : "?0",
+ "sec-ch-ua-platform": "\"Bandóz\"",
+ "sec-fetch-dest" : "empty",
+ "sec-fetch-mode" : "cors",
+ "sec-fetch-site" : "same-origin",
+ "Referer" : "https://chat.aivvm.com/",
+ "Referrer-Policy" : "same-origin",
+ }
+
json_data = {
"model" : models[model],
"messages" : messages,
@@ -43,19 +56,13 @@ class Aivvm(AsyncGeneratorProvider):
"prompt" : kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown."),
"temperature" : kwargs.get("temperature", 0.7)
}
- headers = {
- "Accept": "*/*",
- "Origin": cls.url,
- "Referer": f"{cls.url}/",
- }
- async with StreamSession(impersonate="chrome107", headers=headers, timeout=timeout) as session:
- async with session.post(f"{cls.url}/api/chat", json=json_data) as response:
- response.raise_for_status()
- async for chunk in response.iter_content():
- if b'Access denied | chat.aivvm.com used Cloudflare' in chunk:
- raise ValueError("Rate Limit | use another provider")
-
- yield chunk.decode()
+
+ response = requests.post(
+ "https://chat.aivvm.com/api/chat", headers=headers, json=json_data, stream=True)
+ response.raise_for_status()
+
+ for chunk in response.iter_content(chunk_size=None):
+ yield chunk.decode('utf-8')
@classmethod
@property