summaryrefslogtreecommitdiffstats
path: root/g4f/Provider
diff options
context:
space:
mode:
authorHeiner Lohaus <hlohaus@users.noreply.github.com>2024-02-27 11:55:40 +0100
committerHeiner Lohaus <hlohaus@users.noreply.github.com>2024-02-27 11:55:40 +0100
commit84812b9632cae2dc4811222a2f31d42cb807a221 (patch)
treef7dda40a968be33ee87e78187a2bf41bb78cf973 /g4f/Provider
parentAdd support for message history and system message in OpenaiChat (diff)
downloadgpt4free-84812b9632cae2dc4811222a2f31d42cb807a221.tar
gpt4free-84812b9632cae2dc4811222a2f31d42cb807a221.tar.gz
gpt4free-84812b9632cae2dc4811222a2f31d42cb807a221.tar.bz2
gpt4free-84812b9632cae2dc4811222a2f31d42cb807a221.tar.lz
gpt4free-84812b9632cae2dc4811222a2f31d42cb807a221.tar.xz
gpt4free-84812b9632cae2dc4811222a2f31d42cb807a221.tar.zst
gpt4free-84812b9632cae2dc4811222a2f31d42cb807a221.zip
Diffstat (limited to 'g4f/Provider')
-rw-r--r--g4f/Provider/AiChatOnline.py2
-rw-r--r--g4f/Provider/Aura.py5
-rw-r--r--g4f/Provider/ChatgptAi.py2
-rw-r--r--g4f/Provider/ChatgptDemo.py2
-rw-r--r--g4f/Provider/ChatgptNext.py5
-rw-r--r--g4f/Provider/Chatxyz.py2
-rw-r--r--g4f/Provider/FlowGpt.py8
-rw-r--r--g4f/Provider/You.py19
-rw-r--r--g4f/Provider/__init__.py2
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py33
10 files changed, 47 insertions, 33 deletions
diff --git a/g4f/Provider/AiChatOnline.py b/g4f/Provider/AiChatOnline.py
index dc774fe0..cc3b5b8e 100644
--- a/g4f/Provider/AiChatOnline.py
+++ b/g4f/Provider/AiChatOnline.py
@@ -9,7 +9,7 @@ from .helper import get_random_string
class AiChatOnline(AsyncGeneratorProvider):
url = "https://aichatonline.org"
- working = True
+ working = False
supports_gpt_35_turbo = True
supports_message_history = False
diff --git a/g4f/Provider/Aura.py b/g4f/Provider/Aura.py
index 126c8d0f..d8f3471c 100644
--- a/g4f/Provider/Aura.py
+++ b/g4f/Provider/Aura.py
@@ -6,9 +6,8 @@ from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
class Aura(AsyncGeneratorProvider):
- url = "https://openchat.team"
- working = True
- supports_gpt_35_turbo = True
+ url = "https://openchat.team"
+ working = True
@classmethod
async def create_async_generator(
diff --git a/g4f/Provider/ChatgptAi.py b/g4f/Provider/ChatgptAi.py
index f2785364..a38aea5e 100644
--- a/g4f/Provider/ChatgptAi.py
+++ b/g4f/Provider/ChatgptAi.py
@@ -9,7 +9,7 @@ from .base_provider import AsyncGeneratorProvider
class ChatgptAi(AsyncGeneratorProvider):
url = "https://chatgpt.ai"
- working = True
+ working = False
supports_message_history = True
supports_gpt_35_turbo = True
_system = None
diff --git a/g4f/Provider/ChatgptDemo.py b/g4f/Provider/ChatgptDemo.py
index 2f25477a..666b5753 100644
--- a/g4f/Provider/ChatgptDemo.py
+++ b/g4f/Provider/ChatgptDemo.py
@@ -10,7 +10,7 @@ from .helper import format_prompt
class ChatgptDemo(AsyncGeneratorProvider):
url = "https://chat.chatgptdemo.net"
supports_gpt_35_turbo = True
- working = True
+ working = False
@classmethod
async def create_async_generator(
diff --git a/g4f/Provider/ChatgptNext.py b/g4f/Provider/ChatgptNext.py
index c107a0bf..1ae37bd5 100644
--- a/g4f/Provider/ChatgptNext.py
+++ b/g4f/Provider/ChatgptNext.py
@@ -4,8 +4,7 @@ import json
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
-from .helper import format_prompt
+from ..providers.base_provider import AsyncGeneratorProvider
class ChatgptNext(AsyncGeneratorProvider):
@@ -24,7 +23,7 @@ class ChatgptNext(AsyncGeneratorProvider):
if not model:
model = "gpt-3.5-turbo"
headers = {
- "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
+ "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0",
"Accept": "text/event-stream",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
diff --git a/g4f/Provider/Chatxyz.py b/g4f/Provider/Chatxyz.py
index feb09be9..dd1216aa 100644
--- a/g4f/Provider/Chatxyz.py
+++ b/g4f/Provider/Chatxyz.py
@@ -8,7 +8,7 @@ from .base_provider import AsyncGeneratorProvider
class Chatxyz(AsyncGeneratorProvider):
url = "https://chat.3211000.xyz"
- working = True
+ working = False
supports_gpt_35_turbo = True
supports_message_history = True
diff --git a/g4f/Provider/FlowGpt.py b/g4f/Provider/FlowGpt.py
index 39192bf9..b466a2e6 100644
--- a/g4f/Provider/FlowGpt.py
+++ b/g4f/Provider/FlowGpt.py
@@ -51,12 +51,16 @@ class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin):
"TE": "trailers"
}
async with ClientSession(headers=headers) as session:
+ history = [message for message in messages[:-1] if message["role"] != "system"]
+ system_message = "\n".join([message["content"] for message in messages if message["role"] == "system"])
+ if not system_message:
+ system_message = "You are helpful assistant. Follow the user's instructions carefully."
data = {
"model": model,
"nsfw": False,
"question": messages[-1]["content"],
- "history": [{"role": "assistant", "content": "Hello, how can I help you today?"}, *messages[:-1]],
- "system": kwargs.get("system_message", "You are helpful assistant. Follow the user's instructions carefully."),
+ "history": [{"role": "assistant", "content": "Hello, how can I help you today?"}, *history],
+ "system": system_message,
"temperature": kwargs.get("temperature", 0.7),
"promptId": f"model-{model}",
"documentIds": [],
diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py
index 34130c47..b21fd582 100644
--- a/g4f/Provider/You.py
+++ b/g4f/Provider/You.py
@@ -3,9 +3,9 @@ from __future__ import annotations
import json
import base64
import uuid
-from aiohttp import ClientSession, FormData
+from aiohttp import ClientSession, FormData, BaseConnector
-from ..typing import AsyncGenerator, Messages, ImageType, Cookies
+from ..typing import AsyncResult, Messages, ImageType, Cookies
from .base_provider import AsyncGeneratorProvider
from ..providers.helper import get_connector, format_prompt
from ..image import to_bytes
@@ -26,12 +26,13 @@ class You(AsyncGeneratorProvider):
messages: Messages,
image: ImageType = None,
image_name: str = None,
+ connector: BaseConnector = None,
proxy: str = None,
chat_mode: str = "default",
**kwargs,
- ) -> AsyncGenerator:
+ ) -> AsyncResult:
async with ClientSession(
- connector=get_connector(kwargs.get("connector"), proxy),
+ connector=get_connector(connector, proxy),
headers=DEFAULT_HEADERS
) as client:
if image:
@@ -72,13 +73,13 @@ class You(AsyncGeneratorProvider):
response.raise_for_status()
async for line in response.content:
if line.startswith(b'event: '):
- event = line[7:-1]
+ event = line[7:-1].decode()
elif line.startswith(b'data: '):
- if event == b"youChatUpdate" or event == b"youChatToken":
+ if event in ["youChatUpdate", "youChatToken"]:
data = json.loads(line[6:-1])
- if event == b"youChatToken" and "youChatToken" in data:
- yield data["youChatToken"]
- elif event == b"youChatUpdate" and "t" in data:
+ if event == "youChatToken" and event in data:
+ yield data[event]
+ elif event == "youChatUpdate" and "t" in data:
yield data["t"]
@classmethod
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 6cdc8806..52ba0274 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -1,7 +1,7 @@
from __future__ import annotations
from ..providers.types import BaseProvider, ProviderType
-from ..providers.retry_provider import RetryProvider
+from ..providers.retry_provider import RetryProvider, IterProvider
from ..providers.base_provider import AsyncProvider, AsyncGeneratorProvider
from ..providers.create_images import CreateImagesProvider
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index 8c2668ab..8154bc44 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -334,39 +334,49 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
# Read api_key from args
api_key = kwargs["access_token"] if "access_token" in kwargs else api_key
+ # If no cached args
if cls._args is None:
if api_key is None:
# Read api_key from cookies
cookies = get_cookies("chat.openai.com", False) if cookies is None else cookies
api_key = cookies["access_token"] if "access_token" in cookies else api_key
cls._args = cls._create_request_args(cookies)
-
+ else:
+ # Read api_key from cache
+ api_key = cls._args["headers"]["Authorization"] if "Authorization" in cls._args["headers"] else None
+
async with StreamSession(
proxies={"https": proxy},
impersonate="chrome",
timeout=timeout
) as session:
+ # Read api_key from session cookies
if api_key is None and cookies:
- # Read api_key from session
api_key = await cls.fetch_access_token(session, cls._args["headers"])
-
- if api_key is not None:
- cls._args["headers"]["Authorization"] = f"Bearer {api_key}"
+ # Load default model
+ if cls.default_model is None:
try:
- cls.default_model = await cls.get_default_model(session, cls._args["headers"])
+ if cookies and not model and api_key is not None:
+ cls._args["headers"]["Authorization"] = api_key
+ cls.default_model = cls.get_model(await cls.get_default_model(session, cls._args["headers"]))
+ elif api_key:
+ cls.default_model = cls.get_model(model or "gpt-3.5-turbo")
except Exception as e:
if debug.logging:
+ print("OpenaiChat: Load default_model failed")
print(f"{e.__class__.__name__}: {e}")
-
- if cls.default_model is None:
+ # Browse api_key and update default model
+ if api_key is None or cls.default_model is None:
login_url = os.environ.get("G4F_LOGIN_URL")
if login_url:
yield f"Please login: [ChatGPT]({login_url})\n\n"
try:
cls._args = cls.browse_access_token(proxy)
except MissingRequirementsError:
- raise MissingAuthError(f'Missing or invalid "access_token". Add a new "api_key" please')
- cls.default_model = await cls.get_default_model(session, cls._args["headers"])
+ raise MissingAuthError(f'Missing "access_token". Add a "api_key" please')
+ cls.default_model = cls.get_model(await cls.get_default_model(session, cls._args["headers"]))
+ else:
+ cls._args["headers"]["Authorization"] = api_key
try:
image_response = await cls.upload_image(
@@ -409,7 +419,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
) as response:
cls._update_request_args(session)
if not response.ok:
- raise RuntimeError(f"Response {response.status}: {await response.text()}")
+ message = f"{await response.text()} headers:\n{json.dumps(cls._args['headers'], indent=4)}"
+ raise RuntimeError(f"Response {response.status}: {message}")
last_message: int = 0
async for line in response.iter_lines():
if not line.startswith(b"data: "):