summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkqlio67 <kqlio67@users.noreply.github.com>2024-10-11 08:39:19 +0200
committerkqlio67 <kqlio67@users.noreply.github.com>2024-10-11 08:39:19 +0200
commitf63d1566cf39eda38aabefddf5a409a44fa9d0d1 (patch)
tree1265bf6caa3e3f3e45883122639e3f0a04ad120e
parentfeat(g4f/Provider/ChatifyAI.py): add new AmigoChat text and image models (diff)
downloadgpt4free-f63d1566cf39eda38aabefddf5a409a44fa9d0d1.tar
gpt4free-f63d1566cf39eda38aabefddf5a409a44fa9d0d1.tar.gz
gpt4free-f63d1566cf39eda38aabefddf5a409a44fa9d0d1.tar.bz2
gpt4free-f63d1566cf39eda38aabefddf5a409a44fa9d0d1.tar.lz
gpt4free-f63d1566cf39eda38aabefddf5a409a44fa9d0d1.tar.xz
gpt4free-f63d1566cf39eda38aabefddf5a409a44fa9d0d1.tar.zst
gpt4free-f63d1566cf39eda38aabefddf5a409a44fa9d0d1.zip
Diffstat (limited to '')
-rw-r--r--g4f/Provider/DarkAI.py87
1 files changed, 87 insertions, 0 deletions
diff --git a/g4f/Provider/DarkAI.py b/g4f/Provider/DarkAI.py
new file mode 100644
index 00000000..d5bd86a5
--- /dev/null
+++ b/g4f/Provider/DarkAI.py
@@ -0,0 +1,87 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class DarkAI(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://www.aiuncensored.info"
+ api_endpoint = "https://darkai.foundation/chat"
+ working = True
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'gpt-4o'
+ models = [
+ default_model, # Uncensored
+ 'gpt-3.5-turbo', # Uncensored
+ 'llama-3-70b', # Uncensored
+ 'llama-3-405b',
+ ]
+
+ model_aliases = {
+ "llama-3.1-70b": "llama-3-70b",
+ "llama-3.1-405b": "llama-3-405b",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "accept": "text/event-stream",
+ "content-type": "application/json",
+ "origin": "https://www.aiuncensored.info",
+ "referer": "https://www.aiuncensored.info/",
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
+ }
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ data = {
+ "query": prompt,
+ "model": model,
+ }
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ full_text = ""
+ async for chunk in response.content:
+ if chunk:
+ try:
+ chunk_str = chunk.decode().strip()
+ if chunk_str.startswith('data: '):
+ chunk_data = json.loads(chunk_str[6:])
+ if chunk_data['event'] == 'text-chunk':
+ full_text += chunk_data['data']['text']
+ elif chunk_data['event'] == 'stream-end':
+ if full_text:
+ yield full_text.strip()
+ return
+ except json.JSONDecodeError:
+ print(f"Failed to decode JSON: {chunk_str}")
+ except Exception as e:
+ print(f"Error processing chunk: {e}")
+
+ if full_text:
+ yield full_text.strip()