diff options
author | Heiner Lohaus <hlohaus@users.noreply.github.com> | 2024-03-14 13:53:57 +0100 |
---|---|---|
committer | Heiner Lohaus <hlohaus@users.noreply.github.com> | 2024-03-14 13:53:57 +0100 |
commit | 993c9498c4276836864b01f66f5d08676a994520 (patch) | |
tree | 9d0c3e2544669e09aacdcf635fec9095270c08bd /g4f/Provider | |
parent | Add model preselection in gui (diff) | |
download | gpt4free-993c9498c4276836864b01f66f5d08676a994520.tar gpt4free-993c9498c4276836864b01f66f5d08676a994520.tar.gz gpt4free-993c9498c4276836864b01f66f5d08676a994520.tar.bz2 gpt4free-993c9498c4276836864b01f66f5d08676a994520.tar.lz gpt4free-993c9498c4276836864b01f66f5d08676a994520.tar.xz gpt4free-993c9498c4276836864b01f66f5d08676a994520.tar.zst gpt4free-993c9498c4276836864b01f66f5d08676a994520.zip |
Diffstat (limited to '')
-rw-r--r-- | g4f/Provider/Bing.py | 4 | ||||
-rw-r--r-- | g4f/Provider/needs_auth/OpenaiChat.py | 164 |
2 files changed, 119 insertions, 49 deletions
diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py index e3e47af9..f8b13020 100644 --- a/g4f/Provider/Bing.py +++ b/g4f/Provider/Bing.py @@ -303,7 +303,7 @@ def create_message( struct['arguments'][0]['previousMessages'] = [{ "author": "user", "description": context, - "contextType": "WebPage", + "contextType": "ClientApp", "messageType": "Context", "messageId": "discover-web--page-ping-mriduna-----" }] @@ -404,6 +404,8 @@ async def stream_generate( image_client = BingCreateImages(cookies, proxy) image_response = await image_client.create_async(prompt) except Exception as e: + if debug.logging: + print(f"Bing: Failed to create images: {e}") response_txt += f"\nhttps://www.bing.com/images/create?q={parse.quote(prompt)}" do_read = False if response_txt.startswith(returned_text): diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py index 3d19e003..b501d655 100644 --- a/g4f/Provider/needs_auth/OpenaiChat.py +++ b/g4f/Provider/needs_auth/OpenaiChat.py @@ -15,6 +15,12 @@ except ImportError: has_arkose_generator = False try: + import webview + has_webview = True +except ImportError: + has_webview = False + +try: from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC @@ -25,10 +31,10 @@ from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin from ..helper import get_cookies from ...webdriver import get_browser from ...typing import AsyncResult, Messages, Cookies, ImageType, Union, AsyncIterator -from ...requests import get_args_from_browser +from ...requests import get_args_from_browser, raise_for_status from ...requests.aiohttp import StreamSession from ...image import to_image, to_bytes, ImageResponse, ImageRequest -from ...errors import MissingRequirementsError, MissingAuthError +from ...errors import MissingRequirementsError, MissingAuthError, ProviderNotWorkingError from ... import debug class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): @@ -134,7 +140,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): } # Post the image data to the service and get the image data async with session.post(f"{cls.url}/backend-api/files", json=data, headers=headers) as response: - response.raise_for_status() + cls._update_request_args() + await raise_for_status(response) image_data = { **data, **await response.json(), @@ -152,14 +159,15 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): "x-ms-blob-type": "BlockBlob" } ) as response: - response.raise_for_status() + await raise_for_status(response) # Post the file ID to the service and get the download URL async with session.post( f"{cls.url}/backend-api/files/{image_data['file_id']}/uploaded", json={}, headers=headers ) as response: - response.raise_for_status() + cls._update_request_args(session) + await raise_for_status(response) image_data["download_url"] = (await response.json())["download_url"] return ImageRequest(image_data) @@ -178,7 +186,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): if not cls.default_model: async with session.get(f"{cls.url}/backend-api/models", headers=headers) as response: cls._update_request_args(session) - response.raise_for_status() + await raise_for_status(response) data = await response.json() if "categories" in data: cls.default_model = data["categories"][-1]["default_model"] @@ -261,7 +269,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): file_id = first_part["asset_pointer"].split("file-service://", 1)[1] try: async with session.get(f"{cls.url}/backend-api/files/{file_id}/download", headers=headers) as response: - response.raise_for_status() + cls._update_request_args(session) + await raise_for_status(response) download_url = (await response.json())["download_url"] return ImageResponse(download_url, prompt) except Exception as e: @@ -288,6 +297,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): json={"is_visible": False}, headers=headers ) as response: + cls._update_request_args(session) ... @classmethod @@ -337,31 +347,32 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): if parent_id is None: parent_id = str(uuid.uuid4()) - # Read api_key from arguments - api_key = kwargs["access_token"] if "access_token" in kwargs else api_key - async with StreamSession( proxies={"https": proxy}, impersonate="chrome", timeout=timeout ) as session: - # Read api_key and cookies from cache / browser config + api_key = kwargs["access_token"] if "access_token" in kwargs else api_key if cls._headers is None or cls._expires is None or time.time() > cls._expires: - if api_key is None: - # Read api_key from cookies + if cls._headers is None: cookies = get_cookies("chat.openai.com", False) if cookies is None else cookies api_key = cookies["access_token"] if "access_token" in cookies else api_key - cls._create_request_args(cookies) + if api_key is None: + try: + await cls.webview_access_token() if has_webview else None + except Exception as e: + if debug.logging: + print(f"Use webview failed: {e}") else: api_key = cls._api_key if api_key is None else api_key - # Read api_key with session cookies - #if api_key is None and cookies: - # api_key = await cls.fetch_access_token(session, cls._headers) - # Load default model - if cls.default_model is None and api_key is not None: + + if api_key is not None: + cls._create_request_args(cookies) + cls._set_api_key(api_key) + + if cls.default_model is None and cls._headers is not None: try: if not model: - cls._set_api_key(api_key) cls.default_model = cls.get_model(await cls.get_default_model(session, cls._headers)) else: cls.default_model = cls.get_model(model) @@ -369,8 +380,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): if debug.logging: print("OpenaiChat: Load default_model failed") print(f"{e.__class__.__name__}: {e}") - # Browse api_key and default model - if api_key is None or cls.default_model is None: + if cls.default_model is None: login_url = os.environ.get("G4F_LOGIN_URL") if login_url: yield f"Please login: [ChatGPT]({login_url})\n\n" @@ -379,20 +389,21 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): except MissingRequirementsError: raise MissingAuthError(f'Missing "access_token". Add a "api_key" please') cls.default_model = cls.get_model(await cls.get_default_model(session, cls._headers)) - else: - cls._set_api_key(api_key) async with session.post( f"{cls.url}/backend-api/sentinel/chat-requirements", json={"conversation_mode_kind": "primary_assistant"}, headers=cls._headers ) as response: - response.raise_for_status() + cls._update_request_args(session) + await raise_for_status(response) data = await response.json() + blob = data["arkose"]["dx"] need_arkose = data["arkose"]["required"] chat_token = data["token"] if need_arkose and not has_arkose_generator: + raise ProviderNotWorkingError("OpenAI Plus Subscriber are not working") raise MissingRequirementsError('Install "py-arkose-generator" package') try: @@ -407,6 +418,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): while fields.finish_reason is None: conversation_id = conversation_id if fields.conversation_id is None else fields.conversation_id parent_id = parent_id if fields.message_id is None else fields.message_id + websocket_request_id = str(uuid.uuid4()) data = { "action": action, "conversation_mode": {"kind": "primary_assistant"}, @@ -416,25 +428,29 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): "parent_message_id": parent_id, "model": model, "history_and_training_disabled": history_disabled and not auto_continue, + "websocket_request_id": websocket_request_id } if action != "continue": messages = messages if conversation_id is None else [messages[-1]] - data["messages"] = cls.create_messages(messages, image_request) + data["messages"] = cls.create_messages(messages, image_request) + headers = { + "Accept": "text/event-stream", + "OpenAI-Sentinel-Chat-Requirements-Token": chat_token, + **cls._headers + } + if need_arkose: + raise ProviderNotWorkingError("OpenAI Plus Subscriber are not working") + headers["OpenAI-Sentinel-Arkose-Token"] = await cls.get_arkose_token(session, cls._headers, blob) + headers["OpenAI-Sentinel-Chat-Requirements-Token"] = chat_token async with session.post( f"{cls.url}/backend-api/conversation", json=data, - headers={ - "Accept": "text/event-stream", - **({"OpenAI-Sentinel-Arkose-Token": await cls.get_arkose_token(session)} if need_arkose else {}), - "OpenAI-Sentinel-Chat-Requirements-Token": chat_token, - **cls._headers - } + headers=headers ) as response: cls._update_request_args(session) - if not response.ok: - raise RuntimeError(f"Response {response.status}: {await response.text()}") - async for chunk in cls.iter_messages_chunk(response.iter_lines(), session, fields): + await raise_for_status(response) + async for chunk in cls.iter_messages_chunk(response.iter_lines(), session, fields, websocket_request_id): if response_fields: response_fields = False yield fields @@ -447,21 +463,35 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): await cls.delete_conversation(session, cls._headers, fields.conversation_id) @staticmethod - async def iter_messages_ws(ws: ClientWebSocketResponse, conversation_id: str) -> AsyncIterator: + async def iter_messages_ws(ws: ClientWebSocketResponse, conversation_id: str, is_curl: bool) -> AsyncIterator: while True: - message = await ws.receive_json() + if is_curl: + message = json.loads(ws.recv()[0]) + else: + message = await ws.receive_json() if message["conversation_id"] == conversation_id: yield base64.b64decode(message["body"]) @classmethod - async def iter_messages_chunk(cls, messages: AsyncIterator, session: StreamSession, fields: ResponseFields) -> AsyncIterator: + async def iter_messages_chunk( + cls, + messages: AsyncIterator, + session: StreamSession, + fields: ResponseFields + ) -> AsyncIterator: last_message: int = 0 async for message in messages: if message.startswith(b'{"wss_url":'): message = json.loads(message) - async with session.ws_connect(message["wss_url"]) as ws: - async for chunk in cls.iter_messages_chunk(cls.iter_messages_ws(ws, message["conversation_id"]), session, fields): + ws = await session.ws_connect(message["wss_url"]) + try: + async for chunk in cls.iter_messages_chunk( + cls.iter_messages_ws(ws, message["conversation_id"], hasattr(ws, "recv")), + session, fields + ): yield chunk + finally: + await ws.aclose() break async for chunk in cls.iter_messages_line(session, message, fields): if fields.finish_reason is not None: @@ -514,6 +544,43 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): fields.finish_reason = line["message"]["metadata"]["finish_details"]["type"] @classmethod + async def webview_access_token(cls) -> str: + window = webview.create_window("OpenAI Chat", cls.url) + await asyncio.sleep(3) + prompt_input = None + while not prompt_input: + try: + await asyncio.sleep(1) + prompt_input = window.dom.get_element("#prompt-textarea") + except: + ... + window.evaluate_js(""" +this._fetch = this.fetch; +this.fetch = async (url, options) => { + const response = await this._fetch(url, options); + if (url == "https://chat.openai.com/backend-api/conversation") { + this._headers = options.headers; + return response; + } + return response; +}; +""") + window.evaluate_js(""" + document.querySelector('.from-token-main-surface-secondary').click(); + """) + headers = None + while headers is None: + headers = window.evaluate_js("this._headers") + await asyncio.sleep(1) + headers["User-Agent"] = window.evaluate_js("window.navigator.userAgent") + cookies = [list(*cookie.items()) for cookie in window.get_cookies()] + window.destroy() + cls._cookies = dict([(name, cookie.value) for name, cookie in cookies]) + cls._headers = headers + cls._expires = int(time.time()) + 60 * 60 * 4 + cls._update_cookie_header() + + @classmethod def browse_access_token(cls, proxy: str = None, timeout: int = 1200) -> None: """ Browse to obtain an access token. @@ -542,10 +609,10 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): cls._update_cookie_header() cls._set_api_key(access_token) finally: - driver.close() + driver.close() @classmethod - async def get_arkose_token(cls, session: StreamSession) -> str: + async def get_arkose_token(cls, session: StreamSession, headers: dict, blob: str) -> str: """ Obtain an Arkose token for the session. @@ -559,16 +626,15 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): RuntimeError: If unable to retrieve the token. """ config = { - "pkey": "3D86FBBA-9D22-402A-B512-3420086BA6CC", + "pkey": "35536E1E-65B4-4D96-9D97-6ADB7EFF8147", "surl": "https://tcr9i.chat.openai.com", - "headers": { - "User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36' - }, + "headers": headers, "site": cls.url, + "data": {"blob": blob} } args_for_request = get_values_for_request(config) async with session.post(**args_for_request) as response: - response.raise_for_status() + await raise_for_status(response) decoded_json = await response.json() if "token" in decoded_json: return decoded_json["token"] @@ -591,7 +657,9 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): @classmethod def _create_request_args(cls, cookies: Union[Cookies, None]): - cls._headers = {} + cls._headers = { + "User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36' + } cls._cookies = {} if cookies is None else cookies cls._update_cookie_header() |