diff options
author | H Lohaus <hlohaus@users.noreply.github.com> | 2024-04-05 21:03:12 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-04-05 21:03:12 +0200 |
commit | c791012b218667e1449e1224014f05213fb1fc9c (patch) | |
tree | b4ab92cd92c386b86cdeea1b4d80d3eec43eb02f /g4f | |
parent | Fix Gemini Proxy Connect call failed (#1768) (diff) | |
parent | Add authless OpenaiChat (diff) | |
download | gpt4free-c791012b218667e1449e1224014f05213fb1fc9c.tar gpt4free-c791012b218667e1449e1224014f05213fb1fc9c.tar.gz gpt4free-c791012b218667e1449e1224014f05213fb1fc9c.tar.bz2 gpt4free-c791012b218667e1449e1224014f05213fb1fc9c.tar.lz gpt4free-c791012b218667e1449e1224014f05213fb1fc9c.tar.xz gpt4free-c791012b218667e1449e1224014f05213fb1fc9c.tar.zst gpt4free-c791012b218667e1449e1224014f05213fb1fc9c.zip |
Diffstat (limited to 'g4f')
-rw-r--r-- | g4f/Provider/needs_auth/OpenaiChat.py | 85 | ||||
-rw-r--r-- | g4f/Provider/openai/har_file.py | 10 | ||||
-rw-r--r-- | g4f/errors.py | 3 | ||||
-rw-r--r-- | g4f/models.py | 26 | ||||
-rw-r--r-- | g4f/requests/raise_for_status.py | 5 |
5 files changed, 78 insertions, 51 deletions
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py index 396d73dd..921d7394 100644 --- a/g4f/Provider/needs_auth/OpenaiChat.py +++ b/g4f/Provider/needs_auth/OpenaiChat.py @@ -27,9 +27,9 @@ from ...typing import AsyncResult, Messages, Cookies, ImageType, Union, AsyncIte from ...requests import get_args_from_browser, raise_for_status from ...requests.aiohttp import StreamSession from ...image import to_image, to_bytes, ImageResponse, ImageRequest -from ...errors import MissingAuthError +from ...errors import MissingAuthError, ResponseError from ...providers.conversation import BaseConversation -from ..openai.har_file import getArkoseAndAccessToken +from ..openai.har_file import getArkoseAndAccessToken, NoValidHarFileError from ... import debug class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): @@ -37,7 +37,6 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): url = "https://chat.openai.com" working = True - needs_auth = True supports_gpt_35_turbo = True supports_gpt_4 = True supports_message_history = True @@ -56,6 +55,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): prompt: str = None, model: str = "", messages: Messages = [], + action: str = "next", **kwargs ) -> Response: """ @@ -169,14 +169,17 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): The default model name as a string """ if not cls.default_model: - async with session.get(f"{cls.url}/backend-api/models", headers=headers) as response: + url = f"{cls.url}/backend-anon/models" if cls._api_key is None else f"{cls.url}/backend-api/models" + async with session.get(url, headers=headers) as response: cls._update_request_args(session) + if response.status == 401: + raise MissingAuthError('Add a "api_key" or a .har file' if cls._api_key is None else "Invalid api key") await raise_for_status(response) data = await response.json() if "categories" in data: cls.default_model = data["categories"][-1]["default_model"] return cls.default_model - raise RuntimeError(f"Response: {data}") + raise ResponseError(data) return cls.default_model @classmethod @@ -330,39 +333,42 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): Raises: RuntimeError: If an error occurs during processing. """ - if parent_id is None: - parent_id = str(uuid.uuid4()) - async with StreamSession( proxies={"https": proxy}, impersonate="chrome", timeout=timeout ) as session: + if cls._headers is None or cookies is not None: + cls._create_request_args(cookies) api_key = kwargs["access_token"] if "access_token" in kwargs else api_key - if api_key is not None: - cls._create_request_args(cookies) cls._set_api_key(api_key) - if cls.default_model is None and cls._headers is not None: + if cls.default_model is None and cls._api_key is not None: try: if not model: cls.default_model = cls.get_model(await cls.get_default_model(session, cls._headers)) else: cls.default_model = cls.get_model(model) except Exception as e: + api_key = cls._api_key = None + cls._create_request_args() if debug.logging: print("OpenaiChat: Load default_model failed") print(f"{e.__class__.__name__}: {e}") arkose_token = None if cls.default_model is None: - arkose_token, api_key, cookies = await getArkoseAndAccessToken(proxy) - cls._create_request_args(cookies) - cls._set_api_key(api_key) + try: + arkose_token, api_key, cookies = await getArkoseAndAccessToken(proxy) + cls._create_request_args(cookies) + cls._set_api_key(api_key) + except NoValidHarFileError: + ... cls.default_model = cls.get_model(await cls.get_default_model(session, cls._headers)) async with session.post( + f"{cls.url}/backend-anon/sentinel/chat-requirements" if not cls._api_key else f"{cls.url}/backend-api/sentinel/chat-requirements", json={"conversation_mode_kind": "primary_assistant"}, headers=cls._headers @@ -389,17 +395,22 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): print(f"{e.__class__.__name__}: {e}") model = cls.get_model(model).replace("gpt-3.5-turbo", "text-davinci-002-render-sha") - fields = Conversation(conversation_id, parent_id) if conversation is None else copy(conversation) - fields.finish_reason = None - while fields.finish_reason is None: + if conversation is None: + conversation = Conversation(conversation_id, str(uuid.uuid4()) if parent_id is None else parent_id) + else: + conversation = copy(conversation) + if cls._api_key is None: + auto_continue = False + conversation.finish_reason = None + while conversation.finish_reason is None: websocket_request_id = str(uuid.uuid4()) data = { "action": action, "conversation_mode": {"kind": "primary_assistant"}, "force_paragen": False, "force_rate_limit": False, - "conversation_id": fields.conversation_id, - "parent_message_id": fields.message_id, + "conversation_id": conversation.conversation_id, + "parent_message_id": conversation.message_id, "model": model, "history_and_training_disabled": history_disabled and not auto_continue and not return_conversation, "websocket_request_id": websocket_request_id @@ -415,24 +426,27 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): if need_arkose: headers["OpenAI-Sentinel-Arkose-Token"] = arkose_token async with session.post( + f"{cls.url}/backend-anon/conversation" if cls._api_key is None else f"{cls.url}/backend-api/conversation", json=data, headers=headers ) as response: cls._update_request_args(session) await raise_for_status(response) - async for chunk in cls.iter_messages_chunk(response.iter_lines(), session, fields): + async for chunk in cls.iter_messages_chunk(response.iter_lines(), session, conversation): if return_conversation: history_disabled = False return_conversation = False - yield fields + yield conversation yield chunk - if not auto_continue: + if auto_continue and conversation.finish_reason == "max_tokens": + conversation.finish_reason = None + action = "continue" + await asyncio.sleep(5) + else: break - action = "continue" - await asyncio.sleep(5) if history_disabled and auto_continue: - await cls.delete_conversation(session, cls._headers, fields.conversation_id) + await cls.delete_conversation(session, cls._headers, conversation.conversation_id) @staticmethod async def iter_messages_ws(ws: ClientWebSocketResponse, conversation_id: str, is_curl: bool) -> AsyncIterator: @@ -595,14 +609,27 @@ this.fetch = async (url, options) => { return data["accessToken"] @staticmethod + def get_default_headers() -> dict: + return { + "accept-language": "en-US", + "content-type": "application/json", + "oai-device-id": str(uuid.uuid4()), + "oai-language": "en-US", + "sec-ch-ua": "\"Chromium\";v=\"122\", \"Not(A:Brand\";v=\"24\", \"Google Chrome\";v=\"122\"", + "sec-ch-ua-mobile": "?0", + "sec-ch-ua-platform": "\"Linux\"", + "sec-fetch-dest": "empty", + "sec-fetch-mode": "cors", + "sec-fetch-site": "same-origin" + } + + @staticmethod def _format_cookies(cookies: Cookies): return "; ".join(f"{k}={v}" for k, v in cookies.items() if k != "access_token") @classmethod - def _create_request_args(cls, cookies: Union[Cookies, None]): - cls._headers = { - "User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36' - } + def _create_request_args(cls, cookies: Cookies = None): + cls._headers = cls.get_default_headers() cls._cookies = {} if cookies is None else cookies cls._update_cookie_header() diff --git a/g4f/Provider/openai/har_file.py b/g4f/Provider/openai/har_file.py index 68fe7420..8936c131 100644 --- a/g4f/Provider/openai/har_file.py +++ b/g4f/Provider/openai/har_file.py @@ -11,6 +11,9 @@ from copy import deepcopy from .crypt import decrypt, encrypt from ...requests import StreamSession +class NoValidHarFileError(Exception): + ... + class arkReq: def __init__(self, arkURL, arkBx, arkHeader, arkBody, arkCookies, userAgent): self.arkURL = arkURL @@ -39,7 +42,7 @@ def readHAR(): if harPath: break if not harPath: - raise RuntimeError("No .har file found") + raise NoValidHarFileError("No .har file found") for path in harPath: with open(path, 'rb') as file: try: @@ -54,7 +57,7 @@ def readHAR(): accessToken = json.loads(v["response"]["content"]["text"]).get("accessToken") cookies = {c['name']: c['value'] for c in v['request']['cookies']} if not accessToken: - RuntimeError("No accessToken found in .har files") + raise NoValidHarFileError("No accessToken found in .har files") if not chatArks: return None, accessToken, cookies return chatArks.pop(), accessToken, cookies @@ -75,9 +78,6 @@ def parseHAREntry(entry) -> arkReq: return tmpArk def genArkReq(chatArk: arkReq) -> arkReq: - if not chatArk: - raise RuntimeError("No .har file with arkose found") - tmpArk: arkReq = deepcopy(chatArk) if tmpArk is None or not tmpArk.arkBody or not tmpArk.arkHeader: raise RuntimeError("The .har file is not valid") diff --git a/g4f/errors.py b/g4f/errors.py index 0cb12884..6cb5d177 100644 --- a/g4f/errors.py +++ b/g4f/errors.py @@ -40,5 +40,8 @@ class NoImageResponseError(Exception): class RateLimitError(Exception): ... +class ResponseError(Exception): + ... + class ResponseStatusError(Exception): ...
\ No newline at end of file diff --git a/g4f/models.py b/g4f/models.py index 6fcfdd14..7be74dbf 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -10,19 +10,15 @@ from .Provider import ( ChatgptNext, HuggingChat, HuggingFace, - ChatgptDemo, - GptForLove, + OpenaiChat, ChatgptAi, DeepInfra, - ChatBase, GigaChat, Liaobots, FreeGpt, Llama2, Vercel, Gemini, - GptGo, - Gpt6, Bing, You, Pi, @@ -41,7 +37,7 @@ class Model: name: str base_provider: str best_provider: ProviderType = None - + @staticmethod def __all__() -> list[str]: """Returns a list of all model names.""" @@ -52,9 +48,10 @@ default = Model( base_provider = "", best_provider = RetryProvider([ Bing, - ChatgptAi, GptGo, + ChatgptAi, You, - Chatgpt4Online + Chatgpt4Online, + OpenaiChat ]) ) @@ -63,11 +60,10 @@ gpt_35_long = Model( name = 'gpt-3.5-turbo', base_provider = 'openai', best_provider = RetryProvider([ - FreeGpt, You, - Chatgpt4Online, + FreeGpt, + You, ChatgptNext, - ChatgptDemo, - Gpt6, + OpenaiChat, ]) ) @@ -75,11 +71,7 @@ gpt_35_long = Model( gpt_35_turbo = Model( name = 'gpt-3.5-turbo', base_provider = 'openai', - best_provider = RetryProvider([ - GptGo, You, - GptForLove, ChatBase, - Chatgpt4Online, - ]) + best_provider = OpenaiChat ) gpt_4 = Model( diff --git a/g4f/requests/raise_for_status.py b/g4f/requests/raise_for_status.py index 9e8e141c..0e91505e 100644 --- a/g4f/requests/raise_for_status.py +++ b/g4f/requests/raise_for_status.py @@ -13,12 +13,17 @@ class CloudflareError(ResponseStatusError): def is_cloudflare(text: str) -> bool: return '<div id="cf-please-wait">' in text or "<title>Just a moment...</title>" in text +def is_openai(text: str) -> bool: + return "<p>Unable to load site</p>" in text + async def raise_for_status_async(response: Union[StreamResponse, ClientResponse], message: str = None): if response.status in (429, 402): raise RateLimitError(f"Response {response.status}: Rate limit reached") message = await response.text() if not response.ok and message is None else message if response.status == 403 and is_cloudflare(message): raise CloudflareError(f"Response {response.status}: Cloudflare detected") + elif response.status == 403 and is_openai(message): + raise ResponseStatusError(f"Response {response.status}: Bot are detected") elif not response.ok: raise ResponseStatusError(f"Response {response.status}: {message}") |