summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/deprecated
diff options
context:
space:
mode:
authorTekky <98614666+xtekky@users.noreply.github.com>2023-10-14 15:36:47 +0200
committerGitHub <noreply@github.com>2023-10-14 15:36:47 +0200
commit8bdbb9e9cda7901c3bfc23de2f9f44b3f2e3d1e5 (patch)
tree5f205c79c060098f056a9e1460b872696de5871c /g4f/Provider/deprecated
parentMerge branch 'main' of https://github.com/xtekky/gpt4free (diff)
parentFix Opchatgpts and ChatForAi Provider (diff)
downloadgpt4free-8bdbb9e9cda7901c3bfc23de2f9f44b3f2e3d1e5.tar
gpt4free-8bdbb9e9cda7901c3bfc23de2f9f44b3f2e3d1e5.tar.gz
gpt4free-8bdbb9e9cda7901c3bfc23de2f9f44b3f2e3d1e5.tar.bz2
gpt4free-8bdbb9e9cda7901c3bfc23de2f9f44b3f2e3d1e5.tar.lz
gpt4free-8bdbb9e9cda7901c3bfc23de2f9f44b3f2e3d1e5.tar.xz
gpt4free-8bdbb9e9cda7901c3bfc23de2f9f44b3f2e3d1e5.tar.zst
gpt4free-8bdbb9e9cda7901c3bfc23de2f9f44b3f2e3d1e5.zip
Diffstat (limited to 'g4f/Provider/deprecated')
-rw-r--r--g4f/Provider/deprecated/Aivvm.py84
-rw-r--r--g4f/Provider/deprecated/ChatForAi.py55
-rw-r--r--g4f/Provider/deprecated/Opchatgpts.py74
-rw-r--r--g4f/Provider/deprecated/Vitalentum.py68
-rw-r--r--g4f/Provider/deprecated/__init__.py4
5 files changed, 154 insertions, 131 deletions
diff --git a/g4f/Provider/deprecated/Aivvm.py b/g4f/Provider/deprecated/Aivvm.py
new file mode 100644
index 00000000..12fd387d
--- /dev/null
+++ b/g4f/Provider/deprecated/Aivvm.py
@@ -0,0 +1,84 @@
+from __future__ import annotations
+import requests
+
+from ..base_provider import BaseProvider
+from ...typing import CreateResult, Messages
+from json import dumps
+
+# to recreate this easily, send a post request to https://chat.aivvm.com/api/models
+models = {
+ 'gpt-3.5-turbo': {'id': 'gpt-3.5-turbo', 'name': 'GPT-3.5'},
+ 'gpt-3.5-turbo-0613': {'id': 'gpt-3.5-turbo-0613', 'name': 'GPT-3.5-0613'},
+ 'gpt-3.5-turbo-16k': {'id': 'gpt-3.5-turbo-16k', 'name': 'GPT-3.5-16K'},
+ 'gpt-3.5-turbo-16k-0613': {'id': 'gpt-3.5-turbo-16k-0613', 'name': 'GPT-3.5-16K-0613'},
+ 'gpt-4': {'id': 'gpt-4', 'name': 'GPT-4'},
+ 'gpt-4-0613': {'id': 'gpt-4-0613', 'name': 'GPT-4-0613'},
+ 'gpt-4-32k': {'id': 'gpt-4-32k', 'name': 'GPT-4-32K'},
+ 'gpt-4-32k-0613': {'id': 'gpt-4-32k-0613', 'name': 'GPT-4-32K-0613'},
+}
+
+class Aivvm(BaseProvider):
+ url = 'https://chat.aivvm.com'
+ supports_stream = True
+ working = False
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+
+ @classmethod
+ def create_completion(cls,
+ model: str,
+ messages: Messages,
+ stream: bool,
+ **kwargs
+ ) -> CreateResult:
+ if not model:
+ model = "gpt-3.5-turbo"
+ elif model not in models:
+ raise ValueError(f"Model is not supported: {model}")
+
+ json_data = {
+ "model" : models[model],
+ "messages" : messages,
+ "key" : "",
+ "prompt" : kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown."),
+ "temperature" : kwargs.get("temperature", 0.7)
+ }
+
+ data = dumps(json_data)
+
+ headers = {
+ "accept" : "text/event-stream",
+ "accept-language" : "en-US,en;q=0.9",
+ "content-type" : "application/json",
+ "content-length" : str(len(data)),
+ "sec-ch-ua" : "\"Chrome\";v=\"117\", \"Not;A=Brand\";v=\"8\", \"Chromium\";v=\"117\"",
+ "sec-ch-ua-mobile" : "?0",
+ "sec-ch-ua-platform": "\"Windows\"",
+ "sec-fetch-dest" : "empty",
+ "sec-fetch-mode" : "cors",
+ "sec-fetch-site" : "same-origin",
+ "sec-gpc" : "1",
+ "referrer" : "https://chat.aivvm.com/",
+ "user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
+ }
+
+ response = requests.post("https://chat.aivvm.com/api/chat", headers=headers, data=data, stream=True)
+ response.raise_for_status()
+
+ for chunk in response.iter_content(chunk_size=4096):
+ try:
+ yield chunk.decode("utf-8")
+ except UnicodeDecodeError:
+ yield chunk.decode("unicode-escape")
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ('model', 'str'),
+ ('messages', 'list[dict[str, str]]'),
+ ('stream', 'bool'),
+ ('temperature', 'float'),
+ ]
+ param = ', '.join([': '.join(p) for p in params])
+ return f'g4f.provider.{cls.__name__} supports: ({param})'
diff --git a/g4f/Provider/deprecated/ChatForAi.py b/g4f/Provider/deprecated/ChatForAi.py
deleted file mode 100644
index ab4cd89c..00000000
--- a/g4f/Provider/deprecated/ChatForAi.py
+++ /dev/null
@@ -1,55 +0,0 @@
-from __future__ import annotations
-
-from ...typing import AsyncResult, Messages
-from ...requests import StreamSession
-from ..base_provider import AsyncGeneratorProvider
-
-
-class ChatForAi(AsyncGeneratorProvider):
- url = "https://chatforai.com"
- supports_gpt_35_turbo = True
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- timeout: int = 120,
- **kwargs
- ) -> AsyncResult:
- async with StreamSession(impersonate="chrome107", proxies={"https": proxy}, timeout=timeout) as session:
- prompt = messages[-1]["content"]
- data = {
- "conversationId": "temp",
- "conversationType": "chat_continuous",
- "botId": "chat_continuous",
- "globalSettings":{
- "baseUrl": "https://api.openai.com",
- "model": model if model else "gpt-3.5-turbo",
- "messageHistorySize": 5,
- "temperature": 0.7,
- "top_p": 1,
- **kwargs
- },
- "botSettings": {},
- "prompt": prompt,
- "messages": messages,
- }
- async with session.post(f"{cls.url}/api/handle/provider-openai", json=data) as response:
- response.raise_for_status()
- async for chunk in response.iter_content():
- if b"https://chatforai.store" in chunk:
- raise RuntimeError(f"Response: {chunk.decode()}")
- yield chunk.decode()
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
diff --git a/g4f/Provider/deprecated/Opchatgpts.py b/g4f/Provider/deprecated/Opchatgpts.py
deleted file mode 100644
index cc6a133c..00000000
--- a/g4f/Provider/deprecated/Opchatgpts.py
+++ /dev/null
@@ -1,74 +0,0 @@
-from __future__ import annotations
-
-import os, re
-from aiohttp import ClientSession
-
-from ..base_provider import AsyncProvider, format_prompt
-
-
-class Opchatgpts(AsyncProvider):
- url = "https://opchatgpts.net"
- supports_gpt_35_turbo = True
- working = True
- _nonce = None
-
- @classmethod
- async def create_async(
- cls,
- model: str,
- messages: list[dict[str, str]],
- **kwargs
- ) -> str:
- headers = {
- "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
- "Accept" : "*/*",
- "Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
- "Origin" : "https://opchatgpts.net",
- "Alt-Used" : "opchatgpts.net",
- "Referer" : "https://opchatgpts.net/chatgpt-free-use/",
- "Sec-Fetch-Dest" : "empty",
- "Sec-Fetch-Mode" : "cors",
- "Sec-Fetch-Site" : "same-origin",
- }
- async with ClientSession(
- headers=headers
- ) as session:
- if not cls._nonce:
- async with session.get(
- "https://opchatgpts.net/chatgpt-free-use/",
- params={"id": os.urandom(6).hex()},
- ) as response:
- result = re.search(r'data-nonce="(.*?)"', await response.text())
- if not result:
- raise RuntimeError("No nonce value")
- cls._nonce = result.group(1)
- data = {
- "_wpnonce": cls._nonce,
- "post_id": 28,
- "url": "https://opchatgpts.net/chatgpt-free-use",
- "action": "wpaicg_chat_shortcode_message",
- "message": format_prompt(messages),
- "bot_id": 0
- }
- async with session.post("https://opchatgpts.net/wp-admin/admin-ajax.php", data=data) as response:
- response.raise_for_status()
- data = await response.json()
- if "data" in data:
- return data["data"]
- elif "msg" in data:
- raise RuntimeError(data["msg"])
- else:
- raise RuntimeError(f"Response: {data}")
-
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("temperature", "float"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
diff --git a/g4f/Provider/deprecated/Vitalentum.py b/g4f/Provider/deprecated/Vitalentum.py
new file mode 100644
index 00000000..8deb1f50
--- /dev/null
+++ b/g4f/Provider/deprecated/Vitalentum.py
@@ -0,0 +1,68 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession
+
+from ..base_provider import AsyncGeneratorProvider
+from ...typing import AsyncResult, Messages
+
+class Vitalentum(AsyncGeneratorProvider):
+ url = "https://app.vitalentum.io"
+ supports_gpt_35_turbo = True
+
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
+ "Accept" : "text/event-stream",
+ "Accept-language" : "de,en-US;q=0.7,en;q=0.3",
+ "Origin" : cls.url,
+ "Referer" : cls.url + "/",
+ "Sec-Fetch-Dest" : "empty",
+ "Sec-Fetch-Mode" : "cors",
+ "Sec-Fetch-Site" : "same-origin",
+ }
+ conversation = json.dumps({"history": [{
+ "speaker": "human" if message["role"] == "user" else "bot",
+ "text": message["content"],
+ } for message in messages]})
+ data = {
+ "conversation": conversation,
+ "temperature": 0.7,
+ **kwargs
+ }
+ async with ClientSession(
+ headers=headers
+ ) as session:
+ async with session.post(f"{cls.url}/api/converse-edge", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for line in response.content:
+ line = line.decode()
+ if line.startswith("data: "):
+ if line.startswith("data: [DONE]"):
+ break
+ line = json.loads(line[6:-1])
+ content = line["choices"][0]["delta"].get("content")
+ if content:
+ yield content
+
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ("proxy", "str"),
+ ("temperature", "float"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
diff --git a/g4f/Provider/deprecated/__init__.py b/g4f/Provider/deprecated/__init__.py
index 0644dc52..b37b7edd 100644
--- a/g4f/Provider/deprecated/__init__.py
+++ b/g4f/Provider/deprecated/__init__.py
@@ -4,11 +4,11 @@ from .DfeHub import DfeHub
from .EasyChat import EasyChat
from .Forefront import Forefront
from .GetGpt import GetGpt
-from .Opchatgpts import Opchatgpts
from .Lockchat import Lockchat
from .Wewordle import Wewordle
from .Equing import Equing
from .Wuguokai import Wuguokai
from .V50 import V50
from .FastGpt import FastGpt
-from .ChatForAi import ChatForAi \ No newline at end of file
+from .Aivvm import Aivvm
+from .Vitalentum import Vitalentum \ No newline at end of file