diff options
Diffstat (limited to 'g4f/Provider')
-rw-r--r-- | g4f/Provider/Bing.py | 24 | ||||
-rw-r--r-- | g4f/Provider/bing/upload_image.py | 6 | ||||
-rw-r--r-- | g4f/Provider/needs_auth/OpenaiChat.py | 5 |
3 files changed, 18 insertions, 17 deletions
diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py index da9b0172..50e29d23 100644 --- a/g4f/Provider/Bing.py +++ b/g4f/Provider/Bing.py @@ -9,9 +9,10 @@ from urllib import parse from aiohttp import ClientSession, ClientTimeout from ..typing import AsyncResult, Messages, ImageType +from ..image import ImageResponse from .base_provider import AsyncGeneratorProvider from .bing.upload_image import upload_image -from .bing.create_images import create_images, format_images_markdown +from .bing.create_images import create_images from .bing.conversation import Conversation, create_conversation, delete_conversation class Tones(): @@ -172,7 +173,7 @@ def create_message( prompt: str, tone: str, context: str = None, - image_info: dict = None, + image_response: ImageResponse = None, web_search: bool = False, gpt4_turbo: bool = False ) -> str: @@ -228,9 +229,9 @@ def create_message( 'target': 'chat', 'type': 4 } - if image_info and "imageUrl" in image_info and "originalImageUrl" in image_info: - struct['arguments'][0]['message']['originalImageUrl'] = image_info['originalImageUrl'] - struct['arguments'][0]['message']['imageUrl'] = image_info['imageUrl'] + if image_response.get('imageUrl') and image_response.get('originalImageUrl'): + struct['arguments'][0]['message']['originalImageUrl'] = image_response.get('originalImageUrl') + struct['arguments'][0]['message']['imageUrl'] = image_response.get('imageUrl') struct['arguments'][0]['experienceType'] = None struct['arguments'][0]['attachedFileInfo'] = {"fileName": None, "fileType": None} if context: @@ -262,9 +263,9 @@ async def stream_generate( headers=headers ) as session: conversation = await create_conversation(session, proxy) - image_info = None - if image: - image_info = await upload_image(session, image, tone, proxy) + image_response = await upload_image(session, image, tone, proxy) if image else None + if image_response: + yield image_response try: async with session.ws_connect( 'wss://sydney.bing.com/sydney/ChatHub', @@ -274,7 +275,7 @@ async def stream_generate( ) as wss: await wss.send_str(format_message({'protocol': 'json', 'version': 1})) await wss.receive(timeout=timeout) - await wss.send_str(create_message(conversation, prompt, tone, context, image_info, web_search, gpt4_turbo)) + await wss.send_str(create_message(conversation, prompt, tone, context, image_response, web_search, gpt4_turbo)) response_txt = '' returned_text = '' @@ -290,6 +291,7 @@ async def stream_generate( response = json.loads(obj) if response.get('type') == 1 and response['arguments'][0].get('messages'): message = response['arguments'][0]['messages'][0] + image_response = None if (message['contentOrigin'] != 'Apology'): if 'adaptiveCards' in message: card = message['adaptiveCards'][0]['body'][0] @@ -301,7 +303,7 @@ async def stream_generate( elif message.get('contentType') == "IMAGE": prompt = message.get('text') try: - response_txt += format_images_markdown(await create_images(session, prompt, proxy), prompt) + image_response = ImageResponse(await create_images(session, prompt, proxy), prompt) except: response_txt += f"\nhttps://www.bing.com/images/create?q={parse.quote(prompt)}" final = True @@ -310,6 +312,8 @@ async def stream_generate( if new != "\n": yield new returned_text = response_txt + if image_response: + yield image_response elif response.get('type') == 2: result = response['item']['result'] if result.get('error'): diff --git a/g4f/Provider/bing/upload_image.py b/g4f/Provider/bing/upload_image.py index d92451fa..1af902ef 100644 --- a/g4f/Provider/bing/upload_image.py +++ b/g4f/Provider/bing/upload_image.py @@ -6,7 +6,7 @@ import json import math from ...typing import ImageType from aiohttp import ClientSession -from ...image import to_image, process_image, to_base64 +from ...image import to_image, process_image, to_base64, ImageResponse image_config = { "maxImagePixels": 360000, @@ -19,7 +19,7 @@ async def upload_image( image: ImageType, tone: str, proxy: str = None -) -> dict: +) -> ImageResponse: image = to_image(image) width, height = image.size max_image_pixels = image_config['maxImagePixels'] @@ -55,7 +55,7 @@ async def upload_image( else "https://www.bing.com/images/blob?bcid=" + result['bcid'] ) - return result + return ImageResponse(result["imageUrl"], "", result) def build_image_upload_api_payload(image_bin: str, tone: str): payload = { diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py index 4b11aeaf..a790f0de 100644 --- a/g4f/Provider/needs_auth/OpenaiChat.py +++ b/g4f/Provider/needs_auth/OpenaiChat.py @@ -13,7 +13,6 @@ from ...webdriver import get_browser, get_driver_cookies from ...typing import AsyncResult, Messages from ...requests import StreamSession from ...image import to_image, to_bytes, ImageType, ImageResponse -from ... import debug models = { "gpt-3.5": "text-davinci-002-render-sha", @@ -242,9 +241,7 @@ class OpenaiChat(AsyncGeneratorProvider): json=data, headers={"Accept": "text/event-stream", **headers} ) as response: - try: - response.raise_for_status() - except: + if not response.ok: raise RuntimeError(f"Response {response.status_code}: {await response.text()}") try: last_message: int = 0 |