summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--README.md3
-rw-r--r--g4f/Provider/Chatgpt4Online.py10
-rw-r--r--g4f/Provider/DDG.py5
-rw-r--r--g4f/Provider/FlowGpt.py4
-rw-r--r--g4f/Provider/MagickPenAsk.py3
-rw-r--r--g4f/Provider/Rocks.py56
-rw-r--r--g4f/Provider/__init__.py1
7 files changed, 72 insertions, 10 deletions
diff --git a/README.md b/README.md
index e276d1a7..b85788ca 100644
--- a/README.md
+++ b/README.md
@@ -358,8 +358,7 @@ While we wait for gpt-5, here is a list of new models that are at least better t
| [chatgpt4online.org](https://chatgpt4online.org) | `g4f.Provider.Chatgpt4Online` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [chatgpt-free.cc](https://www.chatgpt-free.cc) | `g4f.Provider.ChatgptNext` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [chatgptx.de](https://chatgptx.de) | `g4f.Provider.ChatgptX` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
-| [f1.cnote.top](https://f1.cnote.top) | `g4f.Provider.Cnote` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
-| [duckduckgo.com](https://duckduckgo.com/duckchat) | `g4f.Provider.DuckDuckGo` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
+| [duckduckgo.com](https://duckduckgo.com/duckchat) | `g4f.Provider.DDG` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [feedough.com](https://www.feedough.com) | `g4f.Provider.Feedough` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [flowgpt.com](https://flowgpt.com/chat) | `g4f.Provider.FlowGpt` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [freegptsnav.aifree.site](https://freegptsnav.aifree.site) | `g4f.Provider.FreeGpt` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
diff --git a/g4f/Provider/Chatgpt4Online.py b/g4f/Provider/Chatgpt4Online.py
index d55be65b..f62ef8af 100644
--- a/g4f/Provider/Chatgpt4Online.py
+++ b/g4f/Provider/Chatgpt4Online.py
@@ -13,7 +13,12 @@ class Chatgpt4Online(AsyncGeneratorProvider):
api_endpoint = "/wp-json/mwai-ui/v1/chats/submit"
working = True
supports_gpt_4 = True
-
+
+ async def get_nonce():
+ async with ClientSession() as session:
+ async with session.post(f"https://chatgpt4online.org/wp-json/mwai/v1/start_session") as response:
+ return (await response.json())["restNonce"]
+
@classmethod
async def create_async_generator(
cls,
@@ -37,7 +42,7 @@ class Chatgpt4Online(AsyncGeneratorProvider):
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
- "x-wp-nonce": "d9505e9877",
+ "x-wp-nonce": await cls.get_nonce(),
}
async with ClientSession(headers=headers) as session:
@@ -69,3 +74,4 @@ class Chatgpt4Online(AsyncGeneratorProvider):
continue
yield full_response
+
diff --git a/g4f/Provider/DDG.py b/g4f/Provider/DDG.py
index 91ccde32..6146994b 100644
--- a/g4f/Provider/DDG.py
+++ b/g4f/Provider/DDG.py
@@ -18,14 +18,13 @@ class DDG(AsyncGeneratorProvider, ProviderModelMixin):
supports_gpt_35_turbo = True
supports_message_history = True
- # default_model = "gpt-3.5-turbo-0125"
default_model = "gpt-4o-mini"
- models = ["gpt-4o-mini", "claude-3-haiku-20240307", "meta-llama/Llama-3-70b-chat-hf", "mistralai/Mixtral-8x7B-Instruct-v0.1"]
+ models = ["gpt-4o-mini", "claude-3-haiku-20240307", "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", "mistralai/Mixtral-8x7B-Instruct-v0.1"]
model_aliases = {
"gpt-4": "gpt-4o-mini",
"gpt-4o": "gpt-4o-mini",
"claude-3-haiku": "claude-3-haiku-20240307",
- "llama-3-70b": "meta-llama/Llama-3-70b-chat-hf",
+ "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
"mixtral-8x7B": "mistralai/Mixtral-8x7B-Instruct-v0.1"
}
diff --git a/g4f/Provider/FlowGpt.py b/g4f/Provider/FlowGpt.py
index 6c2aa046..d823a7ab 100644
--- a/g4f/Provider/FlowGpt.py
+++ b/g4f/Provider/FlowGpt.py
@@ -30,7 +30,7 @@ class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin):
"pygmalion-13b",
"chronos-hermes-13b",
"Mixtral-8x7B",
- "Dolphin-2.6-8x7B"
+ "Dolphin-2.6-8x7B",
]
model_aliases = {
"gemini": "google-gemini",
@@ -91,7 +91,7 @@ class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin):
"generateImage": False,
"generateAudio": False
}
- async with session.post("https://backend-k8s.flowgpt.com/v2/chat-anonymous-encrypted", json=data, proxy=proxy) as response:
+ async with session.post("https://prod-backend-k8s.flowgpt.com/v3/chat-anonymous", json=data, proxy=proxy) as response:
await raise_for_status(response)
async for chunk in response.content:
if chunk.strip():
diff --git a/g4f/Provider/MagickPenAsk.py b/g4f/Provider/MagickPenAsk.py
index 54058228..8b7473d8 100644
--- a/g4f/Provider/MagickPenAsk.py
+++ b/g4f/Provider/MagickPenAsk.py
@@ -36,7 +36,8 @@ class MagickPenAsk(AsyncGeneratorProvider, ProviderModelMixin):
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
+ 'X-API-Secret': 'WCASR6ZQJYM85DVDX7'
}
async with ClientSession(headers=headers) as session:
data = {
diff --git a/g4f/Provider/Rocks.py b/g4f/Provider/Rocks.py
new file mode 100644
index 00000000..8465a6c0
--- /dev/null
+++ b/g4f/Provider/Rocks.py
@@ -0,0 +1,56 @@
+import json
+from aiohttp import ClientSession
+
+from ..typing import Messages, AsyncResult
+from .base_provider import AsyncGeneratorProvider
+
+class Rocks(AsyncGeneratorProvider):
+ url = "https://api.discord.rocks"
+ api_endpoint = "/chat/completions"
+ supports_message_history = False
+ supports_gpt_35_turbo = True
+ working = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ payload = {"messages":messages,"model":model,"max_tokens":4096,"temperature":1,"top_p":1,"stream":True}
+
+ headers = {
+ "Accept": "application/json",
+ "Accept-Encoding": "gzip, deflate, br, zstd",
+ "Accept-Language": "en-US,en;q=0.9",
+ "Origin": cls.url,
+ "Referer": f"{cls.url}/en",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
+ }
+
+ async with ClientSession() as session:
+ async with session.post(
+ f"{cls.url}{cls.api_endpoint}",
+ json=payload,
+ proxy=proxy,
+ headers=headers
+ ) as response:
+ response.raise_for_status()
+ async for line in response.content:
+ if line.startswith(b"data: "):
+ try:
+ line = json.loads(line[6:])
+ except:
+ continue
+ chunk = line["choices"][0]["delta"].get("content")
+ if chunk:
+ yield chunk
+ elif line.startswith(b"\n"):
+ pass
+ else:
+ raise Exception(f"Unexpected line: {line}")
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 8bbf71b3..fa1dcfe5 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -52,6 +52,7 @@ from .Pizzagpt import Pizzagpt
from .Reka import Reka
from .Replicate import Replicate
from .ReplicateHome import ReplicateHome
+from .Rocks import Rocks
from .TeachAnything import TeachAnything
from .Vercel import Vercel
from .WhiteRabbitNeo import WhiteRabbitNeo