summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/not_working
diff options
context:
space:
mode:
authorH Lohaus <hlohaus@users.noreply.github.com>2024-03-12 02:06:06 +0100
committerGitHub <noreply@github.com>2024-03-12 02:06:06 +0100
commit6ef282de3a3245acbfecd08ae48dba85ff91d031 (patch)
tree0236c9678eea8f9c78ed7c09f3d86eaf3d7c691c /g4f/Provider/not_working
parentUpdate .gitignore (diff)
downloadgpt4free-6ef282de3a3245acbfecd08ae48dba85ff91d031.tar
gpt4free-6ef282de3a3245acbfecd08ae48dba85ff91d031.tar.gz
gpt4free-6ef282de3a3245acbfecd08ae48dba85ff91d031.tar.bz2
gpt4free-6ef282de3a3245acbfecd08ae48dba85ff91d031.tar.lz
gpt4free-6ef282de3a3245acbfecd08ae48dba85ff91d031.tar.xz
gpt4free-6ef282de3a3245acbfecd08ae48dba85ff91d031.tar.zst
gpt4free-6ef282de3a3245acbfecd08ae48dba85ff91d031.zip
Diffstat (limited to '')
-rw-r--r--g4f/Provider/not_working/AItianhu.py (renamed from g4f/Provider/AItianhu.py)6
-rw-r--r--g4f/Provider/not_working/Bestim.py (renamed from g4f/Provider/Bestim.py)112
-rw-r--r--g4f/Provider/not_working/ChatBase.py (renamed from g4f/Provider/ChatBase.py)6
-rw-r--r--g4f/Provider/not_working/ChatgptDemo.py (renamed from g4f/Provider/ChatgptDemo.py)50
-rw-r--r--g4f/Provider/not_working/ChatgptDemoAi.py (renamed from g4f/Provider/ChatgptDemoAi.py)7
-rw-r--r--g4f/Provider/not_working/ChatgptLogin.py (renamed from g4f/Provider/ChatgptLogin.py)8
-rw-r--r--g4f/Provider/not_working/Chatxyz.py (renamed from g4f/Provider/Chatxyz.py)4
-rw-r--r--g4f/Provider/not_working/Gpt6.py (renamed from g4f/Provider/Gpt6.py)9
-rw-r--r--g4f/Provider/not_working/GptChatly.py (renamed from g4f/Provider/GptChatly.py)8
-rw-r--r--g4f/Provider/not_working/GptForLove.py (renamed from g4f/Provider/GptForLove.py)10
-rw-r--r--g4f/Provider/not_working/GptGo.py (renamed from g4f/Provider/GptGo.py)10
-rw-r--r--g4f/Provider/not_working/GptGod.py (renamed from g4f/Provider/GptGod.py)8
-rw-r--r--g4f/Provider/not_working/OnlineGpt.py (renamed from g4f/Provider/OnlineGpt.py)9
-rw-r--r--g4f/Provider/not_working/__init__.py14
14 files changed, 140 insertions, 121 deletions
diff --git a/g4f/Provider/AItianhu.py b/g4f/Provider/not_working/AItianhu.py
index 34187694..501b334e 100644
--- a/g4f/Provider/AItianhu.py
+++ b/g4f/Provider/not_working/AItianhu.py
@@ -2,9 +2,9 @@ from __future__ import annotations
import json
-from ..typing import AsyncResult, Messages
-from ..requests import StreamSession
-from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
+from ...typing import AsyncResult, Messages
+from ...requests import StreamSession
+from ..base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
class AItianhu(AsyncGeneratorProvider):
diff --git a/g4f/Provider/Bestim.py b/g4f/Provider/not_working/Bestim.py
index 323bd713..94a4d32b 100644
--- a/g4f/Provider/Bestim.py
+++ b/g4f/Provider/not_working/Bestim.py
@@ -1,56 +1,56 @@
-from __future__ import annotations
-
-from ..typing import Messages
-from .base_provider import BaseProvider, CreateResult
-from ..requests import get_session_from_browser
-from uuid import uuid4
-
-class Bestim(BaseProvider):
- url = "https://chatgpt.bestim.org"
- supports_gpt_35_turbo = True
- supports_message_history = True
- working = False
- supports_stream = True
-
- @classmethod
- def create_completion(
- cls,
- model: str,
- messages: Messages,
- stream: bool,
- proxy: str = None,
- **kwargs
- ) -> CreateResult:
- session = get_session_from_browser(cls.url, proxy=proxy)
- headers = {
- 'Accept': 'application/json, text/event-stream',
- }
- data = {
- "messagesHistory": [{
- "id": str(uuid4()),
- "content": m["content"],
- "from": "you" if m["role"] == "user" else "bot"
- } for m in messages],
- "type": "chat",
- }
- response = session.post(
- url="https://chatgpt.bestim.org/chat/send2/",
- json=data,
- headers=headers,
- stream=True
- )
- response.raise_for_status()
- for line in response.iter_lines():
- if not line.startswith(b"event: trylimit"):
- yield line.decode().removeprefix("data: ")
-
-
-
-
-
-
-
-
-
-
-
+from __future__ import annotations
+
+from ...typing import Messages
+from ..base_provider import BaseProvider, CreateResult
+from ...requests import get_session_from_browser
+from uuid import uuid4
+
+class Bestim(BaseProvider):
+ url = "https://chatgpt.bestim.org"
+ working = False
+ supports_gpt_35_turbo = True
+ supports_message_history = True
+ supports_stream = True
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool,
+ proxy: str = None,
+ **kwargs
+ ) -> CreateResult:
+ session = get_session_from_browser(cls.url, proxy=proxy)
+ headers = {
+ 'Accept': 'application/json, text/event-stream',
+ }
+ data = {
+ "messagesHistory": [{
+ "id": str(uuid4()),
+ "content": m["content"],
+ "from": "you" if m["role"] == "user" else "bot"
+ } for m in messages],
+ "type": "chat",
+ }
+ response = session.post(
+ url="https://chatgpt.bestim.org/chat/send2/",
+ json=data,
+ headers=headers,
+ stream=True
+ )
+ response.raise_for_status()
+ for line in response.iter_lines():
+ if not line.startswith(b"event: trylimit"):
+ yield line.decode().removeprefix("data: ")
+
+
+
+
+
+
+
+
+
+
+
diff --git a/g4f/Provider/ChatBase.py b/g4f/Provider/not_working/ChatBase.py
index 996ca39a..ef1c8f99 100644
--- a/g4f/Provider/ChatBase.py
+++ b/g4f/Provider/not_working/ChatBase.py
@@ -2,15 +2,15 @@ from __future__ import annotations
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
class ChatBase(AsyncGeneratorProvider):
url = "https://www.chatbase.co"
+ working = False
supports_gpt_35_turbo = True
supports_message_history = True
- working = True
jailbreak = True
list_incorrect_responses = ["support@chatbase",
"about Chatbase"]
diff --git a/g4f/Provider/ChatgptDemo.py b/g4f/Provider/not_working/ChatgptDemo.py
index 666b5753..593a2d29 100644
--- a/g4f/Provider/ChatgptDemo.py
+++ b/g4f/Provider/not_working/ChatgptDemo.py
@@ -1,16 +1,17 @@
from __future__ import annotations
-import time, json, re
+import time, json, re, asyncio
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
-from .helper import format_prompt
+from ...typing import AsyncResult, Messages
+from ...errors import RateLimitError
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt
class ChatgptDemo(AsyncGeneratorProvider):
- url = "https://chat.chatgptdemo.net"
- supports_gpt_35_turbo = True
+ url = "https://chatgptdemo.info/chat"
working = False
+ supports_gpt_35_turbo = True
@classmethod
async def create_async_generator(
@@ -21,10 +22,10 @@ class ChatgptDemo(AsyncGeneratorProvider):
**kwargs
) -> AsyncResult:
headers = {
- "authority": "chat.chatgptdemo.net",
- "accept-language": "de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US",
- "origin": "https://chat.chatgptdemo.net",
- "referer": "https://chat.chatgptdemo.net/",
+ "authority": "chatgptdemo.info",
+ "accept-language": "en-US",
+ "origin": "https://chatgptdemo.info",
+ "referer": "https://chatgptdemo.info/chat/",
"sec-ch-ua": '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
@@ -36,28 +37,29 @@ class ChatgptDemo(AsyncGeneratorProvider):
async with ClientSession(headers=headers) as session:
async with session.get(f"{cls.url}/", proxy=proxy) as response:
response.raise_for_status()
- response = await response.text()
-
- result = re.search(
- r'<div id="USERID" style="display: none">(.*?)<\/div>',
- response,
- )
-
- if result:
- user_id = result.group(1)
- else:
- raise RuntimeError("No user id found")
- async with session.post(f"{cls.url}/new_chat", json={"user_id": user_id}, proxy=proxy) as response:
+ text = await response.text()
+ result = re.search(
+ r'<div id="USERID" style="display: none">(.*?)<\/div>',
+ text,
+ )
+ if result:
+ user_id = result.group(1)
+ else:
+ raise RuntimeError("No user id found")
+ async with session.post(f"https://chatgptdemo.info/chat/new_chat", json={"user_id": user_id}, proxy=proxy) as response:
response.raise_for_status()
chat_id = (await response.json())["id_"]
if not chat_id:
raise RuntimeError("Could not create new chat")
+ await asyncio.sleep(10)
data = {
"question": format_prompt(messages),
"chat_id": chat_id,
- "timestamp": int(time.time()*1000),
+ "timestamp": int((time.time())*1e3),
}
- async with session.post(f"{cls.url}/chat_api_stream", json=data, proxy=proxy) as response:
+ async with session.post(f"https://chatgptdemo.info/chat/chat_api_stream", json=data, proxy=proxy) as response:
+ if response.status == 429:
+ raise RateLimitError("Rate limit reached")
response.raise_for_status()
async for line in response.content:
if line.startswith(b"data: "):
diff --git a/g4f/Provider/ChatgptDemoAi.py b/g4f/Provider/not_working/ChatgptDemoAi.py
index a8c98b65..6cdd0c7a 100644
--- a/g4f/Provider/ChatgptDemoAi.py
+++ b/g4f/Provider/not_working/ChatgptDemoAi.py
@@ -3,9 +3,9 @@ from __future__ import annotations
import json
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
-from .helper import get_random_string
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import get_random_string
class ChatgptDemoAi(AsyncGeneratorProvider):
url = "https://chat.chatgptdemo.ai"
@@ -49,6 +49,7 @@ class ChatgptDemoAi(AsyncGeneratorProvider):
async with session.post(f"{cls.url}/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content:
+ response.raise_for_status()
if chunk.startswith(b"data: "):
data = json.loads(chunk[6:])
if data["type"] == "live":
diff --git a/g4f/Provider/ChatgptLogin.py b/g4f/Provider/not_working/ChatgptLogin.py
index 037e0a6e..6e9d57c4 100644
--- a/g4f/Provider/ChatgptLogin.py
+++ b/g4f/Provider/not_working/ChatgptLogin.py
@@ -5,15 +5,15 @@ import time
import json
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
-from .helper import format_prompt
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt
class ChatgptLogin(AsyncGeneratorProvider):
url = "https://chatgptlogin.ai"
- supports_gpt_35_turbo = True
working = False
+ supports_gpt_35_turbo = True
_user_id = None
@classmethod
diff --git a/g4f/Provider/Chatxyz.py b/g4f/Provider/not_working/Chatxyz.py
index dd1216aa..a1b3638e 100644
--- a/g4f/Provider/Chatxyz.py
+++ b/g4f/Provider/not_working/Chatxyz.py
@@ -3,8 +3,8 @@ from __future__ import annotations
import json
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
class Chatxyz(AsyncGeneratorProvider):
url = "https://chat.3211000.xyz"
diff --git a/g4f/Provider/Gpt6.py b/g4f/Provider/not_working/Gpt6.py
index b8a294e2..0c1bdcc5 100644
--- a/g4f/Provider/Gpt6.py
+++ b/g4f/Provider/not_working/Gpt6.py
@@ -3,14 +3,12 @@ from __future__ import annotations
import json
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
-from .helper import format_prompt
-
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
class Gpt6(AsyncGeneratorProvider):
url = "https://gpt6.ai"
- working = True
+ working = False
supports_gpt_35_turbo = True
@classmethod
@@ -45,6 +43,7 @@ class Gpt6(AsyncGeneratorProvider):
async with session.post(f"https://seahorse-app-d29hu.ondigitalocean.app/api/v1/query", json=data, proxy=proxy) as response:
response.raise_for_status()
async for line in response.content:
+ print(line)
if line.startswith(b"data: [DONE]"):
break
elif line.startswith(b"data: "):
diff --git a/g4f/Provider/GptChatly.py b/g4f/Provider/not_working/GptChatly.py
index 9fb739a8..a1e3dd74 100644
--- a/g4f/Provider/GptChatly.py
+++ b/g4f/Provider/not_working/GptChatly.py
@@ -1,13 +1,13 @@
from __future__ import annotations
-from ..requests import Session, get_session_from_browser
-from ..typing import Messages
-from .base_provider import AsyncProvider
+from ...requests import Session, get_session_from_browser
+from ...typing import Messages
+from ..base_provider import AsyncProvider
class GptChatly(AsyncProvider):
url = "https://gptchatly.com"
- working = True
+ working = False
supports_message_history = True
supports_gpt_35_turbo = True
diff --git a/g4f/Provider/GptForLove.py b/g4f/Provider/not_working/GptForLove.py
index cc82da21..4c578227 100644
--- a/g4f/Provider/GptForLove.py
+++ b/g4f/Provider/not_working/GptForLove.py
@@ -9,14 +9,14 @@ try:
except ImportError:
has_requirements = False
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
-from .helper import format_prompt
-from ..errors import MissingRequirementsError
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt
+from ...errors import MissingRequirementsError
class GptForLove(AsyncGeneratorProvider):
url = "https://ai18.gptforlove.com"
- working = True
+ working = False
supports_gpt_35_turbo = True
@classmethod
diff --git a/g4f/Provider/GptGo.py b/g4f/Provider/not_working/GptGo.py
index 538bb7b6..363aabea 100644
--- a/g4f/Provider/GptGo.py
+++ b/g4f/Provider/not_working/GptGo.py
@@ -4,14 +4,14 @@ from aiohttp import ClientSession
import json
import base64
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, format_prompt
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, format_prompt
class GptGo(AsyncGeneratorProvider):
url = "https://gptgo.ai"
+ working = False
supports_gpt_35_turbo = True
- working = True
@classmethod
async def create_async_generator(
@@ -44,6 +44,8 @@ class GptGo(AsyncGeneratorProvider):
) as response:
response.raise_for_status()
token = await response.text();
+ if token == "error token":
+ raise RuntimeError(f"Response: {token}")
token = base64.b64decode(token[10:-20]).decode()
async with session.get(
@@ -57,6 +59,8 @@ class GptGo(AsyncGeneratorProvider):
break
if line.startswith(b"data: "):
line = json.loads(line[6:])
+ if "choices" not in line:
+ raise RuntimeError(f"Response: {line}")
content = line["choices"][0]["delta"].get("content")
if content and content != "\n#GPTGO ":
yield content
diff --git a/g4f/Provider/GptGod.py b/g4f/Provider/not_working/GptGod.py
index 08d9269e..46b40645 100644
--- a/g4f/Provider/GptGod.py
+++ b/g4f/Provider/not_working/GptGod.py
@@ -4,14 +4,14 @@ import secrets
import json
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
-from .helper import format_prompt
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt
class GptGod(AsyncGeneratorProvider):
url = "https://gptgod.site"
- supports_gpt_35_turbo = True
working = False
+ supports_gpt_35_turbo = True
@classmethod
async def create_async_generator(
diff --git a/g4f/Provider/OnlineGpt.py b/g4f/Provider/not_working/OnlineGpt.py
index 9f0d11c4..f4f3a846 100644
--- a/g4f/Provider/OnlineGpt.py
+++ b/g4f/Provider/not_working/OnlineGpt.py
@@ -3,14 +3,13 @@ from __future__ import annotations
import json
from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
-from .helper import get_random_string
-
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import get_random_string
class OnlineGpt(AsyncGeneratorProvider):
url = "https://onlinegpt.org"
- working = True
+ working = False
supports_gpt_35_turbo = True
supports_message_history = False
diff --git a/g4f/Provider/not_working/__init__.py b/g4f/Provider/not_working/__init__.py
new file mode 100644
index 00000000..4778c968
--- /dev/null
+++ b/g4f/Provider/not_working/__init__.py
@@ -0,0 +1,14 @@
+
+from .AItianhu import AItianhu
+from .Bestim import Bestim
+from .ChatBase import ChatBase
+from .ChatgptDemo import ChatgptDemo
+from .ChatgptDemoAi import ChatgptDemoAi
+from .ChatgptLogin import ChatgptLogin
+from .Chatxyz import Chatxyz
+from .Gpt6 import Gpt6
+from .GptChatly import GptChatly
+from .GptForLove import GptForLove
+from .GptGo import GptGo
+from .GptGod import GptGod
+from .OnlineGpt import OnlineGpt \ No newline at end of file