diff options
Diffstat (limited to 'g4f/Provider/unfinished')
-rw-r--r-- | g4f/Provider/unfinished/ChatAiGpt.py | 64 | ||||
-rw-r--r-- | g4f/Provider/unfinished/TalkAi.py | 60 | ||||
-rw-r--r-- | g4f/Provider/unfinished/__init__.py | 4 |
3 files changed, 127 insertions, 1 deletions
diff --git a/g4f/Provider/unfinished/ChatAiGpt.py b/g4f/Provider/unfinished/ChatAiGpt.py new file mode 100644 index 00000000..8ade40c2 --- /dev/null +++ b/g4f/Provider/unfinished/ChatAiGpt.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +import re +from aiohttp import ClientSession + +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider +from ..helper import format_prompt + + +class ChatAiGpt(AsyncGeneratorProvider): + url = "https://chataigpt.org" + supports_gpt_35_turbo = True + working = True + _nonce = None + _post_id = None + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + headers = { + "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0", + "Accept": "*/*", + "Accept-Language": "de,en-US;q=0.7,en;q=0.3", + "Accept-Encoding": "gzip, deflate, br", + "Origin": cls.url, + "Alt-Used": cls.url, + "Connection": "keep-alive", + "Referer": cls.url, + "Pragma": "no-cache", + "Cache-Control": "no-cache", + "TE": "trailers", + "Sec-Fetch-Dest": "empty", + "Sec-Fetch-Mode": "cors", + "Sec-Fetch-Site": "same-origin", + } + async with ClientSession(headers=headers) as session: + if not cls._nonce: + async with session.get(f"{cls.url}/", proxy=proxy) as response: + response.raise_for_status() + response = await response.text() + result = re.search(r'data-nonce=(.*?) data-post-id=([0-9]+)', response) + if not result: + raise RuntimeError("No nonce found") + cls._nonce, cls._post_id = result.group(1), result.group(2) + prompt = format_prompt(messages) + data = { + "_wpnonce": cls._nonce, + "post_id": cls._post_id, + "url": cls.url, + "action": "wpaicg_chat_shortcode_message", + "message": prompt, + "bot_id": 0 + } + async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, proxy=proxy) as response: + response.raise_for_status() + async for chunk in response.content: + if chunk: + yield chunk.decode()
\ No newline at end of file diff --git a/g4f/Provider/unfinished/TalkAi.py b/g4f/Provider/unfinished/TalkAi.py new file mode 100644 index 00000000..a7f1dd84 --- /dev/null +++ b/g4f/Provider/unfinished/TalkAi.py @@ -0,0 +1,60 @@ +from __future__ import annotations + +from aiohttp import ClientSession + +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider + + +class TalkAi(AsyncGeneratorProvider): + url = "https://talkai.info" + supports_gpt_35_turbo = True + working = True + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + if not model: + model = "gpt-3.5-turbo" + headers = { + "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0", + "Accept": "application/json", + "Accept-Language": "de,en-US;q=0.7,en;q=0.3", + "Accept-Encoding": "gzip, deflate, br", + "Referer": f"{cls.url}/de/chat/", + "content-type": "application/json", + "Origin": cls.url, + "Connection": "keep-alive", + "Sec-Fetch-Dest": "empty", + "Sec-Fetch-Mode": "cors", + "Sec-Fetch-Site": "same-origin", + "Pragma": "no-cache", + "Cache-Control": "no-cache" + } + async with ClientSession(headers=headers) as session: + history = [{ + "content": message["content"], + "from": "you" if message["role"] == "user" else "chatGPT" + } for message in messages] + data = { + "type": "chat", + "message": messages[-1]["content"], + "messagesHistory": history, + "model": model, + "max_tokens": 256, + "temperature": 1, + "top_p": 1, + "presence_penalty": 0, + "frequency_penalty": 0, + **kwargs + } + async with session.post(f"{cls.url}/de/chat/send2/", json=data, proxy=proxy) as response: + response.raise_for_status() + async for chunk in response.content: + if chunk: + yield chunk.decode()
\ No newline at end of file diff --git a/g4f/Provider/unfinished/__init__.py b/g4f/Provider/unfinished/__init__.py index 8330b5e4..bf5ff9aa 100644 --- a/g4f/Provider/unfinished/__init__.py +++ b/g4f/Provider/unfinished/__init__.py @@ -1,3 +1,5 @@ from .MikuChat import MikuChat from .PerplexityAi import PerplexityAi -from .Komo import Komo
\ No newline at end of file +from .Komo import Komo +from .TalkAi import TalkAi +from .ChatAiGpt import ChatAiGpt
\ No newline at end of file |