From 80e17bc7c9c4ef9a025e3ca445d473fc001174c1 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Wed, 11 Sep 2024 15:54:02 +0300 Subject: refactor(g4f/Provider/AiChatOnline.py): streamline class attributes --- g4f/Provider/AiChatOnline.py | 2 -- 1 file changed, 2 deletions(-) (limited to 'g4f/Provider') diff --git a/g4f/Provider/AiChatOnline.py b/g4f/Provider/AiChatOnline.py index 152a7d31..40f77105 100644 --- a/g4f/Provider/AiChatOnline.py +++ b/g4f/Provider/AiChatOnline.py @@ -12,10 +12,8 @@ class AiChatOnline(AsyncGeneratorProvider, ProviderModelMixin): url = "https://aichatonlineorg.erweima.ai" api_endpoint = "/aichatonline/api/chat/gpt" working = True - supports_gpt_35_turbo = True supports_gpt_4 = True default_model = 'gpt-4o-mini' - supports_message_history = False @classmethod async def grab_token( -- cgit v1.2.3 From 7d1fa2c623db61bb9116abc8a059d464324b41c9 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Wed, 11 Sep 2024 15:54:26 +0300 Subject: refactor(g4f/Provider/ReplicateHome.py): optimize model organization and improve readability --- g4f/Provider/ReplicateHome.py | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) (limited to 'g4f/Provider') diff --git a/g4f/Provider/ReplicateHome.py b/g4f/Provider/ReplicateHome.py index c4e52ad6..d41633ba 100644 --- a/g4f/Provider/ReplicateHome.py +++ b/g4f/Provider/ReplicateHome.py @@ -15,16 +15,11 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin): parent = "Replicate" working = True default_model = 'meta/meta-llama-3-70b-instruct' + text_models = {"meta/meta-llama-3-70b-instruct", "mistralai/mixtral-8x7b-instruct-v0.1", "google-deepmind/gemma-2b-it"} + image_models = {"stability-ai/stable-diffusion-3", "bytedance/sdxl-lightning-4step", "playgroundai/playground-v2.5-1024px-aesthetic"} models = [ - # Models for image generation - 'stability-ai/stable-diffusion-3', - 'bytedance/sdxl-lightning-4step', - 'playgroundai/playground-v2.5-1024px-aesthetic', - - # Models for image generation - 'meta/meta-llama-3-70b-instruct', - 'mistralai/mixtral-8x7b-instruct-v0.1', - 'google-deepmind/gemma-2b-it', + *text_models, + *image_models ] versions = { @@ -51,9 +46,6 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin): ] } - image_models = {"stability-ai/stable-diffusion-3", "bytedance/sdxl-lightning-4step", "playgroundai/playground-v2.5-1024px-aesthetic"} - text_models = {"meta/meta-llama-3-70b-instruct", "mistralai/mixtral-8x7b-instruct-v0.1", "google-deepmind/gemma-2b-it"} - model_aliases = { "sd-3": "stability-ai/stable-diffusion-3", "sdxl": "bytedance/sdxl-lightning-4step", -- cgit v1.2.3 From fc9cb1f561e3f887dbc03f5c39069501f4f4f52c Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Wed, 11 Sep 2024 15:54:53 +0300 Subject: chore(g4f/Provider/bing/conversation.py): update bundleVersion in create_conversation URLs --- g4f/Provider/bing/conversation.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'g4f/Provider') diff --git a/g4f/Provider/bing/conversation.py b/g4f/Provider/bing/conversation.py index a4195fa4..b5c237f9 100644 --- a/g4f/Provider/bing/conversation.py +++ b/g4f/Provider/bing/conversation.py @@ -33,9 +33,9 @@ async def create_conversation(session: StreamSession, headers: dict, tone: str) Conversation: An instance representing the created conversation. """ if tone == "Copilot": - url = "https://copilot.microsoft.com/turing/conversation/create?bundleVersion=1.1690.0" + url = "https://copilot.microsoft.com/turing/conversation/create?bundleVersion=1.1809.0" else: - url = "https://www.bing.com/turing/conversation/create?bundleVersion=1.1690.0" + url = "https://www.bing.com/turing/conversation/create?bundleVersion=1.1809.0" async with session.get(url, headers=headers) as response: if response.status == 404: raise RateLimitError("Response 404: Do less requests and reuse conversations") @@ -90,4 +90,4 @@ async def delete_conversation(session: StreamSession, conversation: Conversation response = await response.json() return response["result"]["value"] == "Success" except: - return False \ No newline at end of file + return False -- cgit v1.2.3 From b18156a2a0ef7bc5fc9b4e9de85f5a4ff6e18d32 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Wed, 11 Sep 2024 15:55:31 +0300 Subject: refactor(g4f/Provider/Koala.py): update URL structure and default model --- g4f/Provider/Koala.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'g4f/Provider') diff --git a/g4f/Provider/Koala.py b/g4f/Provider/Koala.py index 0e810083..14e533df 100644 --- a/g4f/Provider/Koala.py +++ b/g4f/Provider/Koala.py @@ -10,7 +10,8 @@ from .helper import get_random_string, get_connector from ..requests import raise_for_status class Koala(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://koala.sh" + url = "https://koala.sh/chat" + api_endpoint = "https://koala.sh/api/gpt/" working = True supports_message_history = True supports_gpt_4 = True @@ -26,17 +27,17 @@ class Koala(AsyncGeneratorProvider, ProviderModelMixin): **kwargs: Any ) -> AsyncGenerator[Dict[str, Union[str, int, float, List[Dict[str, Any]], None]], None]: if not model: - model = "gpt-3.5-turbo" + model = "gpt-4o-mini" headers = { "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0", "Accept": "text/event-stream", "Accept-Language": "de,en-US;q=0.7,en;q=0.3", "Accept-Encoding": "gzip, deflate, br", - "Referer": f"{cls.url}/chat", + "Referer": f"{cls.url}", "Flag-Real-Time-Data": "false", "Visitor-ID": get_random_string(20), - "Origin": cls.url, + "Origin": "https://koala.sh", "Alt-Used": "koala.sh", "Sec-Fetch-Dest": "empty", "Sec-Fetch-Mode": "cors", @@ -67,7 +68,7 @@ class Koala(AsyncGeneratorProvider, ProviderModelMixin): "model": model, } - async with session.post(f"{cls.url}/api/gpt/", json=data, proxy=proxy) as response: + async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response: await raise_for_status(response) async for chunk in cls._parse_event_stream(response): yield chunk -- cgit v1.2.3 From 7d028e5e2b5aa569d675a8b87064b94f9d148017 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Wed, 11 Sep 2024 15:56:07 +0300 Subject: refactor(g4f/Provider/Snova.py): update model list and add error note --- g4f/Provider/Snova.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'g4f/Provider') diff --git a/g4f/Provider/Snova.py b/g4f/Provider/Snova.py index 76dfac40..53d8f0bd 100644 --- a/g4f/Provider/Snova.py +++ b/g4f/Provider/Snova.py @@ -24,10 +24,9 @@ class Snova(AsyncGeneratorProvider, ProviderModelMixin): 'Meta-Llama-3.1-70B-Instruct', 'Meta-Llama-3.1-405B-Instruct', 'Samba-CoE', - 'ignos/Mistral-T5-7B-v1', + 'ignos/Mistral-T5-7B-v1', # Error with the answer 'v1olet/v1olet_merged_dpo_7B', 'macadeliccc/WestLake-7B-v2-laser-truthy-dpo', - 'cookinai/DonutLM-v1', ] model_aliases = { @@ -40,7 +39,6 @@ class Snova(AsyncGeneratorProvider, ProviderModelMixin): "samba-coe-v0.1": "Samba-CoE", "v1olet-merged-7b": "v1olet/v1olet_merged_dpo_7B", "westlake-7b-v2": "macadeliccc/WestLake-7B-v2-laser-truthy-dpo", - "donutlm-v1": "cookinai/DonutLM-v1", } @classmethod -- cgit v1.2.3 From a4f75407f69c57e033113c69410213f56e0b61a0 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Wed, 11 Sep 2024 15:59:00 +0300 Subject: Removed g4f/Provider/selenium/AItianhuSpace.py g4f/Provider/Llama.py | Updated g4f/Provider/__init__.py --- g4f/Provider/Llama.py | 91 -------------------------- g4f/Provider/__init__.py | 1 - g4f/Provider/selenium/AItianhuSpace.py | 116 --------------------------------- 3 files changed, 208 deletions(-) delete mode 100644 g4f/Provider/Llama.py delete mode 100644 g4f/Provider/selenium/AItianhuSpace.py (limited to 'g4f/Provider') diff --git a/g4f/Provider/Llama.py b/g4f/Provider/Llama.py deleted file mode 100644 index 235c0994..00000000 --- a/g4f/Provider/Llama.py +++ /dev/null @@ -1,91 +0,0 @@ -from __future__ import annotations - -from aiohttp import ClientSession - -from ..typing import AsyncResult, Messages -from ..requests.raise_for_status import raise_for_status -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin - - -class Llama(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://www.llama2.ai" - working = False - supports_message_history = True - default_model = "meta/meta-llama-3-70b-instruct" - models = [ - "meta/llama-2-7b-chat", - "meta/llama-2-13b-chat", - "meta/llama-2-70b-chat", - "meta/meta-llama-3-8b-instruct", - "meta/meta-llama-3-70b-instruct", - ] - model_aliases = { - "meta-llama/Meta-Llama-3-8B-Instruct": "meta/meta-llama-3-8b-instruct", - "meta-llama/Meta-Llama-3-70B-Instruct": "meta/meta-llama-3-70b-instruct", - "meta-llama/Llama-2-7b-chat-hf": "meta/llama-2-7b-chat", - "meta-llama/Llama-2-13b-chat-hf": "meta/llama-2-13b-chat", - "meta-llama/Llama-2-70b-chat-hf": "meta/llama-2-70b-chat", - } - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - system_message: str = "You are a helpful assistant.", - temperature: float = 0.75, - top_p: float = 0.9, - max_tokens: int = 8000, - **kwargs - ) -> AsyncResult: - headers = { - "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0", - "Accept": "*/*", - "Accept-Language": "de,en-US;q=0.7,en;q=0.3", - "Accept-Encoding": "gzip, deflate, br", - "Referer": f"{cls.url}/", - "Content-Type": "text/plain;charset=UTF-8", - "Origin": cls.url, - "Connection": "keep-alive", - "Sec-Fetch-Dest": "empty", - "Sec-Fetch-Mode": "cors", - "Sec-Fetch-Site": "same-origin", - "Pragma": "no-cache", - "Cache-Control": "no-cache", - "TE": "trailers" - } - async with ClientSession(headers=headers) as session: - system_messages = [message["content"] for message in messages if message["role"] == "system"] - if system_messages: - system_message = "\n".join(system_messages) - messages = [message for message in messages if message["role"] != "system"] - prompt = format_prompt(messages) - data = { - "prompt": prompt, - "model": cls.get_model(model), - "systemPrompt": system_message, - "temperature": temperature, - "topP": top_p, - "maxTokens": max_tokens, - "image": None - } - started = False - async with session.post(f"{cls.url}/api", json=data, proxy=proxy) as response: - await raise_for_status(response) - async for chunk in response.content.iter_any(): - if not chunk: - continue - if not started: - chunk = chunk.lstrip() - started = True - yield chunk.decode(errors="ignore") - -def format_prompt(messages: Messages): - messages = [ - f"[INST] {message['content']} [/INST]" - if message["role"] == "user" - else message["content"] - for message in messages - ] - return "\n".join(messages) + "\n" diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index a9a815ea..10459684 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -41,7 +41,6 @@ from .HuggingFace import HuggingFace from .Koala import Koala from .Liaobots import Liaobots from .LiteIcoding import LiteIcoding -from .Llama import Llama from .Local import Local from .MagickPen import MagickPen from .MetaAI import MetaAI diff --git a/g4f/Provider/selenium/AItianhuSpace.py b/g4f/Provider/selenium/AItianhuSpace.py deleted file mode 100644 index 4c438e3b..00000000 --- a/g4f/Provider/selenium/AItianhuSpace.py +++ /dev/null @@ -1,116 +0,0 @@ -from __future__ import annotations - -import time -import random - -from ...typing import CreateResult, Messages -from ..base_provider import AbstractProvider -from ..helper import format_prompt, get_random_string -from ...webdriver import WebDriver, WebDriverSession, element_send_text -from ... import debug - -class AItianhuSpace(AbstractProvider): - url = "https://chat3.aiyunos.top/" - working = True - supports_stream = True - supports_gpt_35_turbo = True - _domains = ["aitianhu.com", "aitianhu1.top"] - - @classmethod - def create_completion( - cls, - model: str, - messages: Messages, - stream: bool, - domain: str = None, - proxy: str = None, - timeout: int = 120, - webdriver: WebDriver = None, - headless: bool = True, - **kwargs - ) -> CreateResult: - if not model: - model = "gpt-3.5-turbo" - if not domain: - rand = get_random_string(6) - domain = random.choice(cls._domains) - domain = f"{rand}.{domain}" - if debug.logging: - print(f"AItianhuSpace | using domain: {domain}") - url = f"https://{domain}" - prompt = format_prompt(messages) - - with WebDriverSession(webdriver, "", headless=headless, proxy=proxy) as driver: - from selenium.webdriver.common.by import By - from selenium.webdriver.support.ui import WebDriverWait - from selenium.webdriver.support import expected_conditions as EC - - wait = WebDriverWait(driver, timeout) - - # Bypass devtools detection - driver.get("https://blank.page/") - wait.until(EC.visibility_of_element_located((By.ID, "sheet"))) - driver.execute_script(f""" - document.getElementById('sheet').addEventListener('click', () => {{ - window.open(arguments[0]); - }}); - """, url) - driver.find_element(By.ID, "sheet").click() - time.sleep(10) - - original_window = driver.current_window_handle - for window_handle in driver.window_handles: - if window_handle != original_window: - driver.close() - driver.switch_to.window(window_handle) - break - - # Wait for page load - wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea.n-input__textarea-el"))) - - # Register hook in XMLHttpRequest - script = """ -const _http_request_open = XMLHttpRequest.prototype.open; -window._last_message = window._message = ""; -window._loadend = false; -XMLHttpRequest.prototype.open = function(method, url) { - if (url == "/api/chat-process") { - this.addEventListener("progress", (event) => { - const lines = this.responseText.split("\\n"); - try { - window._message = JSON.parse(lines[lines.length-1])["text"]; - } catch(e) { } - }); - this.addEventListener("loadend", (event) => { - window._loadend = true; - }); - } - return _http_request_open.call(this, method, url); -} -""" - driver.execute_script(script) - - # Submit prompt - element_send_text(driver.find_element(By.CSS_SELECTOR, "textarea.n-input__textarea-el"), prompt) - - # Read response - while True: - chunk = driver.execute_script(""" -if (window._message && window._message != window._last_message) { - try { - return window._message.substring(window._last_message.length); - } finally { - window._last_message = window._message; - } -} -if (window._loadend) { - return null; -} -return ""; -""") - if chunk: - yield chunk - elif chunk != "": - break - else: - time.sleep(0.1) \ No newline at end of file -- cgit v1.2.3 From a53954e777d6f88b21cb0b61d99c6d131ab7c9e7 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Wed, 11 Sep 2024 16:16:22 +0300 Subject: Updated g4f/Provider/selenium/__init__.py --- g4f/Provider/Nexra.py | 156 ++++++++++++-------------------------- g4f/Provider/selenium/__init__.py | 1 - 2 files changed, 47 insertions(+), 110 deletions(-) (limited to 'g4f/Provider') diff --git a/g4f/Provider/Nexra.py b/g4f/Provider/Nexra.py index e2c3e197..65c50e73 100644 --- a/g4f/Provider/Nexra.py +++ b/g4f/Provider/Nexra.py @@ -1,40 +1,32 @@ from __future__ import annotations - import json -import base64 from aiohttp import ClientSession -from typing import AsyncGenerator from ..typing import AsyncResult, Messages from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from ..image import ImageResponse from .helper import format_prompt +from ..image import ImageResponse class Nexra(AsyncGeneratorProvider, ProviderModelMixin): url = "https://nexra.aryahcr.cc" - api_endpoint_text = "https://nexra.aryahcr.cc/api/chat/gpt" - api_endpoint_image = "https://nexra.aryahcr.cc/api/image/complements" + chat_api_endpoint = "https://nexra.aryahcr.cc/api/chat/gpt" + image_api_endpoint = "https://nexra.aryahcr.cc/api/image/complements" working = True supports_gpt_35_turbo = True supports_gpt_4 = True - supports_stream = True supports_system_message = True supports_message_history = True default_model = 'gpt-3.5-turbo' - models = [ - # Text models + text_models = [ 'gpt-4', 'gpt-4-0613', 'gpt-4-32k', 'gpt-4-0314', 'gpt-4-32k-0314', 'gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0301', 'gpt-3', 'text-davinci-003', 'text-davinci-002', 'code-davinci-002', 'text-curie-001', 'text-babbage-001', 'text-ada-001', 'davinci', 'curie', 'babbage', 'ada', 'babbage-002', 'davinci-002', - # Image models - 'dalle', 'dalle-mini', 'emi' ] - - image_models = {"dalle", "dalle-mini", "emi"} - text_models = set(models) - image_models + image_models = ['dalle', 'dalle2', 'dalle-mini', 'emi'] + models = [*text_models, *image_models] model_aliases = { "gpt-4": "gpt-4-0613", @@ -60,16 +52,21 @@ class Nexra(AsyncGeneratorProvider, ProviderModelMixin): "gpt-3": "ada", "gpt-3": "babbage-002", "gpt-3": "davinci-002", + + "dalle-2": "dalle2", } - + + @classmethod def get_model(cls, model: str) -> str: - if model in cls.models: + if model in cls.text_models or model in cls.image_models: return model elif model in cls.model_aliases: return cls.model_aliases[model] + elif model in cls.image_models: + return cls.default_image_model else: - return cls.default_model + return cls.default_chat_model @classmethod async def create_async_generator( @@ -78,104 +75,45 @@ class Nexra(AsyncGeneratorProvider, ProviderModelMixin): messages: Messages, proxy: str = None, **kwargs - ) -> AsyncGenerator[str | ImageResponse, None]: + ) -> AsyncResult: model = cls.get_model(model) - if model in cls.image_models: - async for result in cls.create_image_async_generator(model, messages, proxy, **kwargs): - yield result - else: - async for result in cls.create_text_async_generator(model, messages, proxy, **kwargs): - yield result - - @classmethod - async def create_text_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncGenerator[str, None]: headers = { "Content-Type": "application/json", } + async with ClientSession(headers=headers) as session: - data = { - "messages": messages, - "prompt": format_prompt(messages), - "model": model, - "markdown": False, - "stream": False, - } - async with session.post(cls.api_endpoint_text, json=data, proxy=proxy) as response: - response.raise_for_status() - result = await response.text() - json_result = json.loads(result) - yield json_result["gpt"] - - @classmethod - async def create_image_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncGenerator[ImageResponse | str, None]: - headers = { - "Content-Type": "application/json" - } - - prompt = messages[-1]['content'] if messages else "" - - data = { - "prompt": prompt, - "model": model - } - - async def process_response(response_text: str) -> ImageResponse | None: - json_start = response_text.find('{') - if json_start != -1: - json_data = response_text[json_start:] - try: - response_data = json.loads(json_data) - image_data = response_data.get('images', [])[0] + if model in cls.image_models: + # Image generation + prompt = messages[-1]['content'] if messages else "" + data = { + "prompt": prompt, + "model": model, + "response": "url" + } + async with session.post(cls.image_api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + result = await response.text() + result_json = json.loads(result.strip('_')) + image_url = result_json['images'][0] if result_json['images'] else None - if image_data.startswith('data:image/'): - return ImageResponse([image_data], "Generated image") + if image_url: + yield ImageResponse(images=image_url, alt=prompt) + else: + # Text completion + data = { + "messages": messages, + "prompt": format_prompt(messages), + "model": model, + "markdown": False + } + async with session.post(cls.chat_api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + result = await response.text() try: - base64.b64decode(image_data) - data_uri = f"data:image/jpeg;base64,{image_data}" - return ImageResponse([data_uri], "Generated image") - except: - print("Invalid base64 data") - return None - except json.JSONDecodeError: - print("Failed to parse JSON.") - else: - print("No JSON data found in the response.") - return None - - async with ClientSession(headers=headers) as session: - async with session.post(cls.api_endpoint_image, json=data, proxy=proxy) as response: - response.raise_for_status() - response_text = await response.text() - - image_response = await process_response(response_text) - if image_response: - yield image_response - else: - yield "Failed to process image data." - - @classmethod - async def create_async( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> str: - async for response in cls.create_async_generator(model, messages, proxy, **kwargs): - if isinstance(response, ImageResponse): - return response.images[0] - return response + json_response = json.loads(result) + gpt_response = json_response.get('gpt', '') + yield gpt_response + except json.JSONDecodeError: + yield result diff --git a/g4f/Provider/selenium/__init__.py b/g4f/Provider/selenium/__init__.py index 9a020460..1b801725 100644 --- a/g4f/Provider/selenium/__init__.py +++ b/g4f/Provider/selenium/__init__.py @@ -1,4 +1,3 @@ -from .AItianhuSpace import AItianhuSpace from .MyShell import MyShell from .PerplexityAi import PerplexityAi from .Phind import Phind -- cgit v1.2.3 From 890e7891c1e2cc7460626b115b1e3dad8bede316 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Wed, 11 Sep 2024 16:17:59 +0300 Subject: feat(g4f/Provider/PerplexityLabs.py): add model aliases and update default model --- g4f/Provider/PerplexityLabs.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'g4f/Provider') diff --git a/g4f/Provider/PerplexityLabs.py b/g4f/Provider/PerplexityLabs.py index 3656a39b..ecb51f9b 100644 --- a/g4f/Provider/PerplexityLabs.py +++ b/g4f/Provider/PerplexityLabs.py @@ -13,7 +13,7 @@ WS_URL = "wss://www.perplexity.ai/socket.io/" class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin): url = "https://labs.perplexity.ai" working = True - default_model = "llama-3.1-8b-instruct" + default_model = "llama-3.1-70b-instruct" models = [ "llama-3.1-sonar-large-128k-online", "llama-3.1-sonar-small-128k-online", @@ -22,6 +22,15 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin): "llama-3.1-8b-instruct", "llama-3.1-70b-instruct", ] + + model_aliases = { + "llama-3.1-8b": "llama-3.1-sonar-large-128k-online", + "llama-3.1-8b": "sonar-small-128k-online", + "llama-3.1-8b": "llama-3.1-sonar-large-128k-chat", + "llama-3.1-8b": "llama-3.1-sonar-small-128k-chat", + "llama-3.1-8b": "llama-3.1-8b-instruct", + "llama-3.1-70b": "llama-3.1-70b-instruct", + } @classmethod async def create_async_generator( -- cgit v1.2.3 From 2e3d60a5f1d60f7443e78e4b5b692bb76a78d51c Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Wed, 11 Sep 2024 21:59:11 +0300 Subject: feat(g4f/Provider/Bixin123.py): implement dynamic fingerprint generation --- g4f/Provider/Bixin123.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'g4f/Provider') diff --git a/g4f/Provider/Bixin123.py b/g4f/Provider/Bixin123.py index 694a2eff..39422c93 100644 --- a/g4f/Provider/Bixin123.py +++ b/g4f/Provider/Bixin123.py @@ -2,6 +2,7 @@ from __future__ import annotations from aiohttp import ClientSession import json +import random from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from ..typing import AsyncResult, Messages from .helper import format_prompt @@ -30,6 +31,10 @@ class Bixin123(AsyncGeneratorProvider, ProviderModelMixin): else: return cls.default_model + @classmethod + def generate_fingerprint(cls) -> str: + return str(random.randint(100000000, 999999999)) + @classmethod async def create_async_generator( cls, @@ -45,7 +50,7 @@ class Bixin123(AsyncGeneratorProvider, ProviderModelMixin): "accept-language": "en-US,en;q=0.9", "cache-control": "no-cache", "content-type": "application/json", - "fingerprint": "988148794", + "fingerprint": cls.generate_fingerprint(), "origin": cls.url, "pragma": "no-cache", "priority": "u=1, i", -- cgit v1.2.3 From abefd7f36fb5b03e612e5b3e96ba1079c4d2f303 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Thu, 12 Sep 2024 17:55:39 +0300 Subject: refactor(g4f/Provider/HuggingFace.py, g4f/Provider/HuggingChat.py): update provider models and aliases --- g4f/Provider/HuggingChat.py | 6 ++---- g4f/Provider/HuggingFace.py | 6 ++---- 2 files changed, 4 insertions(+), 8 deletions(-) (limited to 'g4f/Provider') diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py index 76c76a35..28e58768 100644 --- a/g4f/Provider/HuggingChat.py +++ b/g4f/Provider/HuggingChat.py @@ -14,8 +14,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): default_model = "meta-llama/Meta-Llama-3.1-70B-Instruct" models = [ 'meta-llama/Meta-Llama-3.1-70B-Instruct', - 'meta-llama/Meta-Llama-3.1-405B-Instruct-FP8', - 'CohereForAI/c4ai-command-r-plus', + 'CohereForAI/c4ai-command-r-plus-08-2024', 'mistralai/Mixtral-8x7B-Instruct-v0.1', 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO', '01-ai/Yi-1.5-34B-Chat', @@ -25,8 +24,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): model_aliases = { "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct", - "llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8", - "command-r-plus": "CohereForAI/c4ai-command-r-plus", + "command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024", "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1", "mixtral-8x7b": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", "yi-1.5-34b": "01-ai/Yi-1.5-34B-Chat", diff --git a/g4f/Provider/HuggingFace.py b/g4f/Provider/HuggingFace.py index 74957862..46cfcca3 100644 --- a/g4f/Provider/HuggingFace.py +++ b/g4f/Provider/HuggingFace.py @@ -17,8 +17,7 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin): default_model = "meta-llama/Meta-Llama-3.1-70B-Instruct" models = [ 'meta-llama/Meta-Llama-3.1-70B-Instruct', - 'meta-llama/Meta-Llama-3.1-405B-Instruct-FP8', - 'CohereForAI/c4ai-command-r-plus', + 'CohereForAI/c4ai-command-r-plus-08-2024', 'mistralai/Mixtral-8x7B-Instruct-v0.1', 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO', '01-ai/Yi-1.5-34B-Chat', @@ -28,8 +27,7 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin): model_aliases = { "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct", - "llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8", - "command-r-plus": "CohereForAI/c4ai-command-r-plus", + "command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024", "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1", "mixtral-8x7b": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", "yi-1.5-34b": "01-ai/Yi-1.5-34B-Chat", -- cgit v1.2.3 From 11f702e0acc506c13569e4f38ad1390fa87b53f0 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Thu, 12 Sep 2024 18:06:52 +0300 Subject: refactor(models): update HuggingChat, HuggingFace provider models and aliases --- g4f/Provider/HuggingChat.py | 4 +--- g4f/Provider/HuggingFace.py | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) (limited to 'g4f/Provider') diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py index 28e58768..fad44957 100644 --- a/g4f/Provider/HuggingChat.py +++ b/g4f/Provider/HuggingChat.py @@ -13,11 +13,10 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): supports_stream = True default_model = "meta-llama/Meta-Llama-3.1-70B-Instruct" models = [ - 'meta-llama/Meta-Llama-3.1-70B-Instruct', + default_model, 'CohereForAI/c4ai-command-r-plus-08-2024', 'mistralai/Mixtral-8x7B-Instruct-v0.1', 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO', - '01-ai/Yi-1.5-34B-Chat', 'mistralai/Mistral-7B-Instruct-v0.3', 'microsoft/Phi-3-mini-4k-instruct', ] @@ -27,7 +26,6 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): "command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024", "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1", "mixtral-8x7b": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", - "yi-1.5-34b": "01-ai/Yi-1.5-34B-Chat", "mistral-7b": "mistralai/Mistral-7B-Instruct-v0.3", "phi-3-mini-4k": "microsoft/Phi-3-mini-4k-instruct", } diff --git a/g4f/Provider/HuggingFace.py b/g4f/Provider/HuggingFace.py index 46cfcca3..4fe02739 100644 --- a/g4f/Provider/HuggingFace.py +++ b/g4f/Provider/HuggingFace.py @@ -16,11 +16,10 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin): supports_message_history = True default_model = "meta-llama/Meta-Llama-3.1-70B-Instruct" models = [ - 'meta-llama/Meta-Llama-3.1-70B-Instruct', + default_model, 'CohereForAI/c4ai-command-r-plus-08-2024', 'mistralai/Mixtral-8x7B-Instruct-v0.1', 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO', - '01-ai/Yi-1.5-34B-Chat', 'mistralai/Mistral-7B-Instruct-v0.3', 'microsoft/Phi-3-mini-4k-instruct', ] @@ -30,7 +29,6 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin): "command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024", "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1", "mixtral-8x7b": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", - "yi-1.5-34b": "01-ai/Yi-1.5-34B-Chat", "mistral-7b": "mistralai/Mistral-7B-Instruct-v0.3", "phi-3-mini-4k": "microsoft/Phi-3-mini-4k-instruct", } -- cgit v1.2.3 From 2aa514bfd30d54a7d265d54676b8901c7a000c4f Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Thu, 12 Sep 2024 20:28:50 +0300 Subject: Added a new provider for generating images: g4f/Provider/Prodia.py --- g4f/Provider/Prodia.py | 150 +++++++++++++++++++++++++++++++++++++++++++++++ g4f/Provider/__init__.py | 1 + 2 files changed, 151 insertions(+) create mode 100644 g4f/Provider/Prodia.py (limited to 'g4f/Provider') diff --git a/g4f/Provider/Prodia.py b/g4f/Provider/Prodia.py new file mode 100644 index 00000000..cad2cc6c --- /dev/null +++ b/g4f/Provider/Prodia.py @@ -0,0 +1,150 @@ +from __future__ import annotations + +from aiohttp import ClientSession +import time +import asyncio + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..image import ImageResponse + +class Prodia(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://app.prodia.com" + api_endpoint = "https://api.prodia.com/generate" + working = True + + default_model = 'absolutereality_v181.safetensors [3d9d4d2b]' + models = [ + '3Guofeng3_v34.safetensors [50f420de]', + 'absolutereality_V16.safetensors [37db0fc3]', + default_model, + 'amIReal_V41.safetensors [0a8a2e61]', + 'analog-diffusion-1.0.ckpt [9ca13f02]', + 'aniverse_v30.safetensors [579e6f85]', + 'anythingv3_0-pruned.ckpt [2700c435]', + 'anything-v4.5-pruned.ckpt [65745d25]', + 'anythingV5_PrtRE.safetensors [893e49b9]', + 'AOM3A3_orangemixs.safetensors [9600da17]', + 'AOM3A3_orangemixs.safetensors [9600da17]', + 'blazing_drive_v10g.safetensors [ca1c1eab]', + 'breakdomain_I2428.safetensors [43cc7d2f]', + 'breakdomain_M2150.safetensors [15f7afca]', + 'cetusMix_Version35.safetensors [de2f2560]', + 'childrensStories_v13D.safetensors [9dfaabcb]', + 'childrensStories_v1SemiReal.safetensors [a1c56dbb]', + 'childrensStories_v1ToonAnime.safetensors [2ec7b88b]', + 'Counterfeit_v30.safetensors [9e2a8f19]', + 'cuteyukimixAdorable_midchapter3.safetensors [04bdffe6]', + 'cyberrealistic_v33.safetensors [82b0d085]', + 'dalcefo_v4.safetensors [425952fe]', + 'deliberate_v2.safetensors [10ec4b29]', + 'deliberate_v3.safetensors [afd9d2d4]', + 'dreamlike-anime-1.0.safetensors [4520e090]', + 'dreamlike-diffusion-1.0.safetensors [5c9fd6e0]', + 'dreamlike-photoreal-2.0.safetensors [fdcf65e7]', + 'dreamshaper_6BakedVae.safetensors [114c8abb]', + 'dreamshaper_7.safetensors [5cf5ae06]', + 'dreamshaper_8.safetensors [9d40847d]', + 'edgeOfRealism_eorV20.safetensors [3ed5de15]', + 'EimisAnimeDiffusion_V1.ckpt [4f828a15]', + 'elldreths-vivid-mix.safetensors [342d9d26]', + 'epicphotogasm_xPlusPlus.safetensors [1a8f6d35]', + 'epicrealism_naturalSinRC1VAE.safetensors [90a4c676]', + 'epicrealism_pureEvolutionV3.safetensors [42c8440c]', + 'ICantBelieveItsNotPhotography_seco.safetensors [4e7a3dfd]', + 'indigoFurryMix_v75Hybrid.safetensors [91208cbb]', + 'juggernaut_aftermath.safetensors [5e20c455]', + 'lofi_v4.safetensors [ccc204d6]', + 'lyriel_v16.safetensors [68fceea2]', + 'majicmixRealistic_v4.safetensors [29d0de58]', + 'mechamix_v10.safetensors [ee685731]', + 'meinamix_meinaV9.safetensors [2ec66ab0]', + 'meinamix_meinaV11.safetensors [b56ce717]', + 'neverendingDream_v122.safetensors [f964ceeb]', + 'openjourney_V4.ckpt [ca2f377f]', + 'pastelMixStylizedAnime_pruned_fp16.safetensors [793a26e8]', + 'portraitplus_V1.0.safetensors [1400e684]', + 'protogenx34.safetensors [5896f8d5]', + 'Realistic_Vision_V1.4-pruned-fp16.safetensors [8d21810b]', + 'Realistic_Vision_V2.0.safetensors [79587710]', + 'Realistic_Vision_V4.0.safetensors [29a7afaa]', + 'Realistic_Vision_V5.0.safetensors [614d1063]', + 'Realistic_Vision_V5.1.safetensors [a0f13c83]', + 'redshift_diffusion-V10.safetensors [1400e684]', + 'revAnimated_v122.safetensors [3f4fefd9]', + 'rundiffusionFX25D_v10.safetensors [cd12b0ee]', + 'rundiffusionFX_v10.safetensors [cd4e694d]', + 'sdv1_4.ckpt [7460a6fa]', + 'v1-5-pruned-emaonly.safetensors [d7049739]', + 'v1-5-inpainting.safetensors [21c7ab71]', + 'shoninsBeautiful_v10.safetensors [25d8c546]', + 'theallys-mix-ii-churned.safetensors [5d9225a4]', + 'timeless-1.0.ckpt [7c4971d4]', + 'toonyou_beta6.safetensors [980f6b15]', + ] + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + "accept": "*/*", + "accept-language": "en-US,en;q=0.9", + "origin": cls.url, + "referer": f"{cls.url}/", + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36" + } + + async with ClientSession(headers=headers) as session: + prompt = messages[-1]['content'] if messages else "" + + params = { + "new": "true", + "prompt": prompt, + "model": model, + "negative_prompt": kwargs.get("negative_prompt", ""), + "steps": kwargs.get("steps", 20), + "cfg": kwargs.get("cfg", 7), + "seed": kwargs.get("seed", int(time.time())), + "sampler": kwargs.get("sampler", "DPM++ 2M Karras"), + "aspect_ratio": kwargs.get("aspect_ratio", "square") + } + + async with session.get(cls.api_endpoint, params=params, proxy=proxy) as response: + response.raise_for_status() + job_data = await response.json() + job_id = job_data["job"] + + image_url = await cls._poll_job(session, job_id, proxy) + yield ImageResponse(image_url, alt=prompt) + + @classmethod + async def _poll_job(cls, session: ClientSession, job_id: str, proxy: str, max_attempts: int = 30, delay: int = 2) -> str: + for _ in range(max_attempts): + async with session.get(f"https://api.prodia.com/job/{job_id}", proxy=proxy) as response: + response.raise_for_status() + job_status = await response.json() + + if job_status["status"] == "succeeded": + return f"https://images.prodia.xyz/{job_id}.png" + elif job_status["status"] == "failed": + raise Exception("Image generation failed") + + await asyncio.sleep(delay) + + raise Exception("Timeout waiting for image generation") diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index 10459684..de211c7e 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -50,6 +50,7 @@ from .Ollama import Ollama from .PerplexityLabs import PerplexityLabs from .Pi import Pi from .Pizzagpt import Pizzagpt +from .Prodia import Prodia from .Reka import Reka from .Snova import Snova from .Replicate import Replicate -- cgit v1.2.3 From abea4ddbcca40c1c1da51507363867fb3664228f Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Thu, 12 Sep 2024 21:35:31 +0300 Subject: Bug fixes and improvements to HuggingChat and HuggingFace providers --- g4f/Provider/HuggingChat.py | 1 + g4f/Provider/HuggingFace.py | 23 +++++------------------ 2 files changed, 6 insertions(+), 18 deletions(-) (limited to 'g4f/Provider') diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py index fad44957..7edb2f9f 100644 --- a/g4f/Provider/HuggingChat.py +++ b/g4f/Provider/HuggingChat.py @@ -12,6 +12,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): working = True supports_stream = True default_model = "meta-llama/Meta-Llama-3.1-70B-Instruct" + models = [ default_model, 'CohereForAI/c4ai-command-r-plus-08-2024', diff --git a/g4f/Provider/HuggingFace.py b/g4f/Provider/HuggingFace.py index 4fe02739..586e5f5f 100644 --- a/g4f/Provider/HuggingFace.py +++ b/g4f/Provider/HuggingFace.py @@ -9,29 +9,16 @@ from .helper import get_connector from ..errors import RateLimitError, ModelNotFoundError from ..requests.raise_for_status import raise_for_status +from .HuggingChat import HuggingChat + class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin): url = "https://huggingface.co/chat" working = True needs_auth = True supports_message_history = True - default_model = "meta-llama/Meta-Llama-3.1-70B-Instruct" - models = [ - default_model, - 'CohereForAI/c4ai-command-r-plus-08-2024', - 'mistralai/Mixtral-8x7B-Instruct-v0.1', - 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO', - 'mistralai/Mistral-7B-Instruct-v0.3', - 'microsoft/Phi-3-mini-4k-instruct', - ] - - model_aliases = { - "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct", - "command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024", - "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1", - "mixtral-8x7b": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", - "mistral-7b": "mistralai/Mistral-7B-Instruct-v0.3", - "phi-3-mini-4k": "microsoft/Phi-3-mini-4k-instruct", - } + default_model = HuggingChat.default_model + models = HuggingChat.models + model_aliases = HuggingChat.model_aliases @classmethod def get_model(cls, model: str) -> str: -- cgit v1.2.3 From 75bd017023ead0c888e169f66f7622a44670399c Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Thu, 12 Sep 2024 21:43:28 +0300 Subject: Bug fixes and improvements to HuggingChat provider --- g4f/Provider/HuggingChat.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'g4f/Provider') diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py index 7edb2f9f..8598358d 100644 --- a/g4f/Provider/HuggingChat.py +++ b/g4f/Provider/HuggingChat.py @@ -14,7 +14,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): default_model = "meta-llama/Meta-Llama-3.1-70B-Instruct" models = [ - default_model, + 'meta-llama/Meta-Llama-3.1-70B-Instruct', 'CohereForAI/c4ai-command-r-plus-08-2024', 'mistralai/Mixtral-8x7B-Instruct-v0.1', 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO', @@ -77,7 +77,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): response = session.post('https://huggingface.co/chat/conversation', json=json_data) conversationId = response.json()['conversationId'] - response = session.get(f'https://huggingface.co/chat/conversation/{conversationId}/__data.json?x-sveltekit-invalidated=01',) + response = session.get(f'https://huggingface.co/chat/conversation/{conversationId}/__data.json?x-sveltekit-invalidated=11',) data: list = (response.json())["nodes"][1]["data"] keys: list[int] = data[data[0]["messages"]] -- cgit v1.2.3 From ba5f6efa394fc5193b6170d0f3f76729c82454bc Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Thu, 12 Sep 2024 21:45:14 +0300 Subject: Fixing errors with models in the provider --- g4f/Provider/Prodia.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'g4f/Provider') diff --git a/g4f/Provider/Prodia.py b/g4f/Provider/Prodia.py index cad2cc6c..5a6de9a0 100644 --- a/g4f/Provider/Prodia.py +++ b/g4f/Provider/Prodia.py @@ -17,7 +17,7 @@ class Prodia(AsyncGeneratorProvider, ProviderModelMixin): models = [ '3Guofeng3_v34.safetensors [50f420de]', 'absolutereality_V16.safetensors [37db0fc3]', - default_model, + 'absolutereality_v181.safetensors [3d9d4d2b]', 'amIReal_V41.safetensors [0a8a2e61]', 'analog-diffusion-1.0.ckpt [9ca13f02]', 'aniverse_v30.safetensors [579e6f85]', -- cgit v1.2.3 From e23ee1f80933296751978c05571449e93363baaa Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Thu, 12 Sep 2024 22:11:01 +0300 Subject: new model added --- g4f/Provider/HuggingChat.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'g4f/Provider') diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py index 8598358d..06216ade 100644 --- a/g4f/Provider/HuggingChat.py +++ b/g4f/Provider/HuggingChat.py @@ -26,7 +26,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct", "command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024", "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1", - "mixtral-8x7b": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", + "mixtral-8x7b-dpo": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", "mistral-7b": "mistralai/Mistral-7B-Instruct-v0.3", "phi-3-mini-4k": "microsoft/Phi-3-mini-4k-instruct", } -- cgit v1.2.3 From a15578e22970076ca86042686e2e53655f8956d9 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Thu, 12 Sep 2024 23:20:02 +0300 Subject: refactor(g4f/Provider/Blackbox.py): streamline model handling and improve image generation --- g4f/Provider/Blackbox.py | 175 +++++++++++++++++++---------------------------- 1 file changed, 71 insertions(+), 104 deletions(-) (limited to 'g4f/Provider') diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py index 9fab4a09..014ffc55 100644 --- a/g4f/Provider/Blackbox.py +++ b/g4f/Provider/Blackbox.py @@ -1,43 +1,38 @@ from __future__ import annotations -import uuid -import secrets import re -import base64 +import json from aiohttp import ClientSession -from typing import AsyncGenerator, Optional from ..typing import AsyncResult, Messages, ImageType -from ..image import to_data_uri, ImageResponse +from ..image import ImageResponse, to_data_uri from .base_provider import AsyncGeneratorProvider, ProviderModelMixin class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): url = "https://www.blackbox.ai" + api_endpoint = "https://www.blackbox.ai/api/chat" working = True + supports_stream = True + supports_system_message = True + supports_message_history = True + default_model = 'blackbox' models = [ - default_model, - "gemini-1.5-flash", + 'blackbox', + 'gemini-1.5-flash', "llama-3.1-8b", 'llama-3.1-70b', 'llama-3.1-405b', - 'ImageGeneration', + 'ImageGenerationLV45LJp' ] - - model_aliases = { - "gemini-flash": "gemini-1.5-flash", - } - - agent_mode_map = { - 'ImageGeneration': {"mode": True, "id": "ImageGenerationLV45LJp", "name": "Image Generation"}, - } - model_id_map = { - "blackbox": {}, + model_config = { + "blackbox": {'mode': True, 'id': 'blackbox'}, "gemini-1.5-flash": {'mode': True, 'id': 'Gemini'}, "llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"}, 'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"}, - 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"} + 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"}, + 'ImageGenerationLV45LJp': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"}, } @classmethod @@ -49,108 +44,80 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): else: return cls.default_model - @classmethod - async def download_image_to_base64_url(cls, url: str) -> str: - async with ClientSession() as session: - async with session.get(url) as response: - image_data = await response.read() - base64_data = base64.b64encode(image_data).decode('utf-8') - mime_type = response.headers.get('Content-Type', 'image/jpeg') - return f"data:{mime_type};base64,{base64_data}" - @classmethod async def create_async_generator( cls, model: str, messages: Messages, - proxy: Optional[str] = None, - image: Optional[ImageType] = None, - image_name: Optional[str] = None, + proxy: str = None, + image: ImageType = None, + image_name: str = None, **kwargs - ) -> AsyncGenerator[AsyncResult, None]: - if image is not None: - messages[-1]["data"] = { - "fileText": image_name, - "imageBase64": to_data_uri(image), - "title": str(uuid.uuid4()) - } - + ) -> AsyncResult: + model = cls.get_model(model) + headers = { - "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36", - "Accept": "*/*", - "Accept-Language": "en-US,en;q=0.5", - "Accept-Encoding": "gzip, deflate, br", - "Referer": cls.url, - "Content-Type": "application/json", - "Origin": cls.url, - "DNT": "1", - "Sec-GPC": "1", - "Alt-Used": "www.blackbox.ai", - "Connection": "keep-alive", + "accept": "*/*", + "accept-language": "en-US,en;q=0.9", + "cache-control": "no-cache", + "content-type": "application/json", + "origin": cls.url, + "pragma": "no-cache", + "referer": f"{cls.url}/", + "sec-ch-ua": '"Not;A=Brand";v="24", "Chromium";v="128"', + "sec-ch-ua-mobile": "?0", + "sec-ch-ua-platform": '"Linux"', + "sec-fetch-dest": "empty", + "sec-fetch-mode": "cors", + "sec-fetch-site": "same-origin", + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36" } - + async with ClientSession(headers=headers) as session: - random_id = secrets.token_hex(16) - random_user_id = str(uuid.uuid4()) - - model = cls.get_model(model) # Resolve the model alias + if image is not None: + messages[-1]["data"] = { + "fileText": image_name, + "imageBase64": to_data_uri(image) + } data = { "messages": messages, - "id": random_id, - "userId": random_user_id, + "id": "MRtAuMi", + "previewToken": None, + "userId": None, "codeModelMode": True, - "agentMode": cls.agent_mode_map.get(model, {}), + "agentMode": {}, "trendingAgentMode": {}, "isMicMode": False, + "maxTokens": 1024, "isChromeExt": False, - "playgroundMode": False, - "webSearchMode": False, - "userSystemPrompt": "", "githubToken": None, - "trendingAgentModel": cls.model_id_map.get(model, {}), - "maxTokens": None + "clickedAnswer2": False, + "clickedAnswer3": False, + "clickedForceWebSearch": False, + "visitFromDelta": False, + "mobileClient": False } - async with session.post( - f"{cls.url}/api/chat", json=data, proxy=proxy - ) as response: + if model == 'ImageGenerationLV45LJp': + data["agentMode"] = cls.model_config[model] + else: + data["trendingAgentMode"] = cls.model_config[model] + + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: response.raise_for_status() - full_response = "" - buffer = "" - image_base64_url = None - async for chunk in response.content.iter_any(): - if chunk: - decoded_chunk = chunk.decode() - cleaned_chunk = re.sub(r'\$@\$.+?\$@\$|\$@\$', '', decoded_chunk) - - buffer += cleaned_chunk - - # Check if there's a complete image line in the buffer - image_match = re.search(r'!\[Generated Image\]\((https?://[^\s\)]+)\)', buffer) - if image_match: - image_url = image_match.group(1) - # Download the image and convert to base64 URL - image_base64_url = await cls.download_image_to_base64_url(image_url) - - # Remove the image line from the buffer - buffer = re.sub(r'!\[Generated Image\]\(https?://[^\s\)]+\)', '', buffer) - - # Send text line by line - lines = buffer.split('\n') - for line in lines[:-1]: - if line.strip(): - full_response += line + '\n' - yield line + '\n' - buffer = lines[-1] # Keep the last incomplete line in the buffer - - # Send the remaining buffer if it's not empty - if buffer.strip(): - full_response += buffer - yield buffer - - # If an image was found, send it as ImageResponse - if image_base64_url: - alt_text = "Generated Image" - image_response = ImageResponse(image_base64_url, alt=alt_text) - yield image_response + if model == 'ImageGenerationLV45LJp': + response_text = await response.text() + url_match = re.search(r'https://storage\.googleapis\.com/[^\s\)]+', response_text) + if url_match: + image_url = url_match.group(0) + yield ImageResponse(image_url, alt=messages[-1]['content']) + else: + raise Exception("Image URL not found in the response") + else: + async for chunk in response.content: + if chunk: + decoded_chunk = chunk.decode() + if decoded_chunk.startswith('$@$v=undefined-rv1$@$'): + decoded_chunk = decoded_chunk[len('$@$v=undefined-rv1$@$'):] + yield decoded_chunk -- cgit v1.2.3 From 786a75b9f3fc0a4a64415bcb97eb7c31d98b2f14 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Fri, 13 Sep 2024 00:00:44 +0300 Subject: Updated g4f/Provider/Blackbox.py --- g4f/Provider/Blackbox.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'g4f/Provider') diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py index 014ffc55..d3d68ade 100644 --- a/g4f/Provider/Blackbox.py +++ b/g4f/Provider/Blackbox.py @@ -2,6 +2,8 @@ from __future__ import annotations import re import json +import random +import string from aiohttp import ClientSession from ..typing import AsyncResult, Messages, ImageType @@ -80,16 +82,18 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): "imageBase64": to_data_uri(image) } + random_id = ''.join(random.choices(string.ascii_letters + string.digits, k=7)) + data = { "messages": messages, - "id": "MRtAuMi", + "id": random_id, "previewToken": None, "userId": None, "codeModelMode": True, "agentMode": {}, "trendingAgentMode": {}, "isMicMode": False, - "maxTokens": 1024, + "maxTokens": None, "isChromeExt": False, "githubToken": None, "clickedAnswer2": False, -- cgit v1.2.3 From 29515b6946879786f72a2ea31367b65b1cfdff8e Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Fri, 13 Sep 2024 01:29:20 +0300 Subject: Fixing errors in the provider g4f/Provider/Prodia.py --- g4f/Provider/Prodia.py | 1 - 1 file changed, 1 deletion(-) (limited to 'g4f/Provider') diff --git a/g4f/Provider/Prodia.py b/g4f/Provider/Prodia.py index 5a6de9a0..dd87a34c 100644 --- a/g4f/Provider/Prodia.py +++ b/g4f/Provider/Prodia.py @@ -25,7 +25,6 @@ class Prodia(AsyncGeneratorProvider, ProviderModelMixin): 'anything-v4.5-pruned.ckpt [65745d25]', 'anythingV5_PrtRE.safetensors [893e49b9]', 'AOM3A3_orangemixs.safetensors [9600da17]', - 'AOM3A3_orangemixs.safetensors [9600da17]', 'blazing_drive_v10g.safetensors [ca1c1eab]', 'breakdomain_I2428.safetensors [43cc7d2f]', 'breakdomain_M2150.safetensors [15f7afca]', -- cgit v1.2.3 From 3e491c63d7443a6c312986324f8f1390bede48d1 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Fri, 13 Sep 2024 17:09:31 +0300 Subject: feat(providers): unify Rocks and FluxAirforce into Airforce provider --- g4f/Provider/Airforce.py | 255 +++++++++++++++++++++++++++++++++++++++++++ g4f/Provider/FluxAirforce.py | 82 -------------- g4f/Provider/Rocks.py | 70 ------------ g4f/Provider/__init__.py | 3 +- 4 files changed, 256 insertions(+), 154 deletions(-) create mode 100644 g4f/Provider/Airforce.py delete mode 100644 g4f/Provider/FluxAirforce.py delete mode 100644 g4f/Provider/Rocks.py (limited to 'g4f/Provider') diff --git a/g4f/Provider/Airforce.py b/g4f/Provider/Airforce.py new file mode 100644 index 00000000..88896096 --- /dev/null +++ b/g4f/Provider/Airforce.py @@ -0,0 +1,255 @@ +from __future__ import annotations + +from aiohttp import ClientSession, ClientResponseError +from urllib.parse import urlencode +import json +import io +import asyncio + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..image import ImageResponse, is_accepted_format +from .helper import format_prompt + +class Airforce(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://api.airforce" + text_api_endpoint = "https://api.airforce/chat/completions" + image_api_endpoint = "https://api.airforce/v1/imagine2" + working = True + supports_gpt_35_turbo = True + supports_gpt_4 = True + supports_stream = True + supports_system_message = True + supports_message_history = True + default_model = 'llama-3-70b-chat' + text_models = [ + # Open source models + 'llama-2-13b-chat', + + 'llama-3-70b-chat', + 'llama-3-70b-chat-turbo', + 'llama-3-70b-chat-lite', + + 'llama-3-8b-chat', + 'llama-3-8b-chat-turbo', + 'llama-3-8b-chat-lite', + + 'llama-3.1-405b-turbo', + 'llama-3.1-70b-turbo', + 'llama-3.1-8b-turbo', + + 'LlamaGuard-2-8b', + 'Llama-Guard-7b', + 'Meta-Llama-Guard-3-8B', + + 'Mixtral-8x7B-Instruct-v0.1', + 'Mixtral-8x22B-Instruct-v0.1', + 'Mistral-7B-Instruct-v0.1', + 'Mistral-7B-Instruct-v0.2', + 'Mistral-7B-Instruct-v0.3', + + 'Qwen1.5-72B-Chat', + 'Qwen1.5-110B-Chat', + 'Qwen2-72B-Instruct', + + 'gemma-2b-it', + 'gemma-2-9b-it', + 'gemma-2-27b-it', + + 'dbrx-instruct', + + 'deepseek-llm-67b-chat', + + 'Nous-Hermes-2-Mixtral-8x7B-DPO', + 'Nous-Hermes-2-Yi-34B', + + 'WizardLM-2-8x22B', + + 'SOLAR-10.7B-Instruct-v1.0', + + 'StripedHyena-Nous-7B', + + 'sparkdesk', + + + # Other models + 'chatgpt-4o-latest', + 'gpt-4', + 'gpt-4-turbo', + 'gpt-4o-mini-2024-07-18', + 'gpt-4o-mini', + 'gpt-4o', + 'gpt-3.5-turbo', + 'gpt-3.5-turbo-0125', + 'gpt-3.5-turbo-1106', + 'gpt-3.5-turbo-16k', + 'gpt-3.5-turbo-0613', + 'gpt-3.5-turbo-16k-0613', + + 'gemini-1.5-flash', + 'gemini-1.5-pro', + ] + image_models = [ + 'flux', + 'flux-realism', + 'flux-anime', + 'flux-3d', + 'flux-disney', + 'flux-pixel', + 'any-dark', + ] + + models = [ + *text_models, + *image_models + ] + model_aliases = { + # Open source models + "llama-2-13b": "llama-2-13b-chat", + + "llama-3-70b": "llama-3-70b-chat", + "llama-3-70b": "llama-3-70b-chat-turbo", + "llama-3-70b": "llama-3-70b-chat-lite", + + "llama-3-8b": "llama-3-8b-chat", + "llama-3-8b": "llama-3-8b-chat-turbo", + "llama-3-8b": "llama-3-8b-chat-lite", + + "llama-3.1-405b": "llama-3.1-405b-turbo", + "llama-3.1-70b": "llama-3.1-70b-turbo", + "llama-3.1-8b": "llama-3.1-8b-turbo", + + "mixtral-8x7b": "Mixtral-8x7B-Instruct-v0.1", + "mixtral-8x22b": "Mixtral-8x22B-Instruct-v0.1", + "mistral-7b": "Mistral-7B-Instruct-v0.1", + "mistral-7b": "Mistral-7B-Instruct-v0.2", + "mistral-7b": "Mistral-7B-Instruct-v0.3", + + "mixtral-8x7b-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO", + + "qwen-1-5-72b": "Qwen1.5-72B-Chat", + "qwen-1_5-110b": "Qwen1.5-110B-Chat", + "qwen-2-72b": "Qwen2-72B-Instruct", + + "gemma-2b": "gemma-2b-it", + "gemma-2b-9b": "gemma-2-9b-it", + "gemma-2b-27b": "gemma-2-27b-it", + + "deepseek": "deepseek-llm-67b-chat", + + "yi-34b": "Nous-Hermes-2-Yi-34B", + + "wizardlm-2-8x22b": "WizardLM-2-8x22B", + + "solar-10-7b": "SOLAR-10.7B-Instruct-v1.0", + + "sh-n-7b": "StripedHyena-Nous-7B", + + "sparkdesk-v1.1": "sparkdesk", + + + # Other models + "gpt-4o": "chatgpt-4o-latest", + "gpt-4o-mini": "gpt-4o-mini-2024-07-18", + + "gpt-3.5-turbo": "gpt-3.5-turbo-0125", + "gpt-3.5-turbo": "gpt-3.5-turbo-1106", + "gpt-3.5-turbo": "gpt-3.5-turbo-16k", + "gpt-3.5-turbo": "gpt-3.5-turbo-0613", + "gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613", + + + "gemini-flash": "gemini-1.5-flash", + "gemini-pro": "gemini-1.5-pro", + } + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + "accept": "*/*", + "accept-language": "en-US,en;q=0.9", + "content-type": "application/json", + "origin": "https://api.airforce", + "sec-ch-ua": '"Chromium";v="128", "Not(A:Brand";v="24"', + "sec-ch-ua-mobile": "?0", + "sec-ch-ua-platform": '"Linux"', + "sec-fetch-dest": "empty", + "sec-fetch-mode": "cors", + "sec-fetch-site": "cross-site", + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36" + } + + + if model in cls.image_models: + async for item in cls.generate_image(model, messages, headers, proxy, **kwargs): + yield item + else: + async for item in cls.generate_text(model, messages, headers, proxy, **kwargs): + yield item + + @classmethod + async def generate_text(cls, model: str, messages: Messages, headers: dict, proxy: str, **kwargs) -> AsyncResult: + async with ClientSession(headers=headers) as session: + data = { + "messages": [{"role": "user", "content": format_prompt(messages)}], + "model": model, + "temperature": kwargs.get('temperature', 1), + "top_p": kwargs.get('top_p', 1), + "stream": True + } + + async with session.post(cls.text_api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + async for line in response.content: + if line: + line = line.decode('utf-8').strip() + if line.startswith("data: "): + try: + data = json.loads(line[6:]) + if 'choices' in data and len(data['choices']) > 0: + delta = data['choices'][0].get('delta', {}) + if 'content' in delta: + yield delta['content'] + except json.JSONDecodeError: + continue + elif line == "data: [DONE]": + break + + @classmethod + async def generate_image(cls, model: str, messages: Messages, headers: dict, proxy: str, **kwargs) -> AsyncResult: + prompt = messages[-1]['content'] if messages else "" + params = { + "prompt": prompt, + "size": kwargs.get("size", "1:1"), + "seed": kwargs.get("seed"), + "model": model + } + params = {k: v for k, v in params.items() if v is not None} + + try: + async with ClientSession(headers=headers) as session: + async with session.get(cls.image_api_endpoint, params=params, proxy=proxy) as response: + response.raise_for_status() + content = await response.read() + + if response.content_type.startswith('image/'): + image_url = str(response.url) + yield ImageResponse(image_url, prompt) + else: + try: + text = content.decode('utf-8', errors='ignore') + yield f"Error: {text}" + except Exception as decode_error: + yield f"Error: Unable to decode response - {str(decode_error)}" + except ClientResponseError as e: + yield f"Error: HTTP {e.status}: {e.message}" + except Exception as e: + yield f"Unexpected error: {str(e)}" diff --git a/g4f/Provider/FluxAirforce.py b/g4f/Provider/FluxAirforce.py deleted file mode 100644 index fe003a61..00000000 --- a/g4f/Provider/FluxAirforce.py +++ /dev/null @@ -1,82 +0,0 @@ -from __future__ import annotations - -from aiohttp import ClientSession, ClientResponseError -from urllib.parse import urlencode -import io - -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from ..image import ImageResponse, is_accepted_format - -class FluxAirforce(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://flux.api.airforce/" - api_endpoint = "https://api.airforce/v1/imagine2" - working = True - default_model = 'flux-realism' - models = [ - 'flux', - 'flux-realism', - 'flux-anime', - 'flux-3d', - 'flux-disney' - ] - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - headers = { - "accept": "*/*", - "accept-language": "en-US,en;q=0.9", - "origin": "https://flux.api.airforce", - "priority": "u=1, i", - "referer": "https://flux.api.airforce/", - "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-site", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36" - } - - prompt = messages[-1]['content'] if messages else "" - - params = { - "prompt": prompt, - "size": kwargs.get("size", "1:1"), - "seed": kwargs.get("seed"), - "model": model - } - - params = {k: v for k, v in params.items() if v is not None} - - try: - async with ClientSession(headers=headers) as session: - async with session.get(f"{cls.api_endpoint}", params=params, proxy=proxy) as response: - response.raise_for_status() - - content = await response.read() - - if response.content_type.startswith('image/'): - image_url = str(response.url) - yield ImageResponse(image_url, prompt) - else: - try: - text = content.decode('utf-8', errors='ignore') - yield f"Error: {text}" - except Exception as decode_error: - yield f"Error: Unable to decode response - {str(decode_error)}" - - except ClientResponseError as e: - yield f"Error: HTTP {e.status}: {e.message}" - except Exception as e: - yield f"Unexpected error: {str(e)}" - - finally: - if not session.closed: - await session.close() diff --git a/g4f/Provider/Rocks.py b/g4f/Provider/Rocks.py deleted file mode 100644 index f44e0060..00000000 --- a/g4f/Provider/Rocks.py +++ /dev/null @@ -1,70 +0,0 @@ -import asyncio -import json -from aiohttp import ClientSession -from ..typing import Messages, AsyncResult -from .base_provider import AsyncGeneratorProvider - -class Rocks(AsyncGeneratorProvider): - url = "https://api.airforce" - api_endpoint = "/chat/completions" - supports_message_history = True - supports_gpt_35_turbo = True - supports_gpt_4 = True - supports_stream = True - supports_system_message = True - working = True - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - payload = {"messages":messages,"model":model,"max_tokens":4096,"temperature":1,"top_p":1,"stream":True} - - headers = { - "Accept": "application/json", - "Accept-Encoding": "gzip, deflate, br, zstd", - "Accept-Language": "en-US,en;q=0.9", - "Authorization": "Bearer missing api key", - "Origin": "https://llmplayground.net", - "Referer": "https://llmplayground.net/", - "Sec-Fetch-Dest": "empty", - "Sec-Fetch-Mode": "cors", - "Sec-Fetch-Site": "same-origin", - "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36", - } - - async with ClientSession() as session: - async with session.post( - f"{cls.url}{cls.api_endpoint}", - json=payload, - proxy=proxy, - headers=headers - ) as response: - response.raise_for_status() - last_chunk_time = asyncio.get_event_loop().time() - - async for line in response.content: - current_time = asyncio.get_event_loop().time() - if current_time - last_chunk_time > 5: - return - - if line.startswith(b"\n"): - pass - elif "discord.com/invite/" in line.decode() or "discord.gg/" in line.decode(): - pass # trolled - elif line.startswith(b"data: "): - try: - line = json.loads(line[6:]) - except json.JSONDecodeError: - continue - chunk = line["choices"][0]["delta"].get("content") - if chunk: - yield chunk - last_chunk_time = current_time - else: - raise Exception(f"Unexpected line: {line}") - return \ No newline at end of file diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index de211c7e..e4c85d6a 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -13,6 +13,7 @@ from .AI365VIP import AI365VIP from .Allyfy import Allyfy from .AiChatOnline import AiChatOnline from .AiChats import AiChats +from .Airforce import Airforce from .Aura import Aura from .Bing import Bing from .BingCreateImages import BingCreateImages @@ -28,7 +29,6 @@ from .DDG import DDG from .DeepInfra import DeepInfra from .DeepInfraImage import DeepInfraImage from .FlowGpt import FlowGpt -from .FluxAirforce import FluxAirforce from .Free2GPT import Free2GPT from .FreeChatgpt import FreeChatgpt from .FreeGpt import FreeGpt @@ -55,7 +55,6 @@ from .Reka import Reka from .Snova import Snova from .Replicate import Replicate from .ReplicateHome import ReplicateHome -from .Rocks import Rocks from .TeachAnything import TeachAnything from .TwitterBio import TwitterBio from .Upstage import Upstage -- cgit v1.2.3 From f13557f779d2fe0df31d4fba080f21bc73e51b2a Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Sat, 14 Sep 2024 12:52:21 +0300 Subject: Fixing errors in g4f/Provider/Nexra.py --- g4f/Provider/Nexra.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'g4f/Provider') diff --git a/g4f/Provider/Nexra.py b/g4f/Provider/Nexra.py index 65c50e73..b2b83837 100644 --- a/g4f/Provider/Nexra.py +++ b/g4f/Provider/Nexra.py @@ -56,17 +56,14 @@ class Nexra(AsyncGeneratorProvider, ProviderModelMixin): "dalle-2": "dalle2", } - @classmethod def get_model(cls, model: str) -> str: if model in cls.text_models or model in cls.image_models: return model elif model in cls.model_aliases: return cls.model_aliases[model] - elif model in cls.image_models: - return cls.default_image_model else: - return cls.default_chat_model + return cls.default_model @classmethod async def create_async_generator( -- cgit v1.2.3 From 5d28b048364cf1e55a4888e4d2469dd855f0549e Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Sun, 15 Sep 2024 15:07:56 +0300 Subject: refactor(api): update model configuration for Blackbox --- g4f/Provider/Blackbox.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'g4f/Provider') diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py index d3d68ade..e607a43c 100644 --- a/g4f/Provider/Blackbox.py +++ b/g4f/Provider/Blackbox.py @@ -29,7 +29,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): ] model_config = { - "blackbox": {'mode': True, 'id': 'blackbox'}, + "blackbox": {}, "gemini-1.5-flash": {'mode': True, 'id': 'Gemini'}, "llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"}, 'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"}, -- cgit v1.2.3 From d69372a9629b5ae8c1ecae7536200a764095ce38 Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Sun, 15 Sep 2024 22:02:06 +0300 Subject: refactor(ReplicateHome): update model handling and API interaction --- g4f/Provider/ReplicateHome.py | 211 ++++++++++++++++++++---------------------- 1 file changed, 101 insertions(+), 110 deletions(-) (limited to 'g4f/Provider') diff --git a/g4f/Provider/ReplicateHome.py b/g4f/Provider/ReplicateHome.py index d41633ba..7f443a7d 100644 --- a/g4f/Provider/ReplicateHome.py +++ b/g4f/Provider/ReplicateHome.py @@ -1,58 +1,60 @@ from __future__ import annotations -from typing import Generator, Optional, Dict, Any, Union, List -import random + +import json import asyncio -import base64 +from aiohttp import ClientSession, ContentTypeError -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from ..typing import AsyncResult, Messages -from ..requests import StreamSession, raise_for_status -from ..errors import ResponseError +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt from ..image import ImageResponse class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin): url = "https://replicate.com" - parent = "Replicate" + api_endpoint = "https://homepage.replicate.com/api/prediction" working = True + supports_stream = True + supports_system_message = True + supports_message_history = True + default_model = 'meta/meta-llama-3-70b-instruct' - text_models = {"meta/meta-llama-3-70b-instruct", "mistralai/mixtral-8x7b-instruct-v0.1", "google-deepmind/gemma-2b-it"} - image_models = {"stability-ai/stable-diffusion-3", "bytedance/sdxl-lightning-4step", "playgroundai/playground-v2.5-1024px-aesthetic"} - models = [ - *text_models, - *image_models + + text_models = [ + 'meta/meta-llama-3-70b-instruct', + 'mistralai/mixtral-8x7b-instruct-v0.1', + 'google-deepmind/gemma-2b-it', + 'yorickvp/llava-13b', ] - versions = { - # Model versions for generating images - 'stability-ai/stable-diffusion-3': [ - "527d2a6296facb8e47ba1eaf17f142c240c19a30894f437feee9b91cc29d8e4f" - ], - 'bytedance/sdxl-lightning-4step': [ - "5f24084160c9089501c1b3545d9be3c27883ae2239b6f412990e82d4a6210f8f" - ], - 'playgroundai/playground-v2.5-1024px-aesthetic': [ - "a45f82a1382bed5c7aeb861dac7c7d191b0fdf74d8d57c4a0e6ed7d4d0bf7d24" - ], - - # Model versions for text generation - 'meta/meta-llama-3-70b-instruct': [ - "dp-cf04fe09351e25db628e8b6181276547" - ], - 'mistralai/mixtral-8x7b-instruct-v0.1': [ - "dp-89e00f489d498885048e94f9809fbc76" - ], - 'google-deepmind/gemma-2b-it': [ - "dff94eaf770e1fc211e425a50b51baa8e4cac6c39ef074681f9e39d778773626" - ] - } + image_models = [ + 'black-forest-labs/flux-schnell', + 'stability-ai/stable-diffusion-3', + 'bytedance/sdxl-lightning-4step', + 'playgroundai/playground-v2.5-1024px-aesthetic', + ] + models = text_models + image_models + model_aliases = { + "flux-schnell": "black-forest-labs/flux-schnell", "sd-3": "stability-ai/stable-diffusion-3", "sdxl": "bytedance/sdxl-lightning-4step", "playground-v2.5": "playgroundai/playground-v2.5-1024px-aesthetic", "llama-3-70b": "meta/meta-llama-3-70b-instruct", "mixtral-8x7b": "mistralai/mixtral-8x7b-instruct-v0.1", "gemma-2b": "google-deepmind/gemma-2b-it", + "llava-13b": "yorickvp/llava-13b", + } + + model_versions = { + "meta/meta-llama-3-70b-instruct": "fbfb20b472b2f3bdd101412a9f70a0ed4fc0ced78a77ff00970ee7a2383c575d", + "mistralai/mixtral-8x7b-instruct-v0.1": "5d78bcd7a992c4b793465bcdcf551dc2ab9668d12bb7aa714557a21c1e77041c", + "google-deepmind/gemma-2b-it": "dff94eaf770e1fc211e425a50b51baa8e4cac6c39ef074681f9e39d778773626", + "yorickvp/llava-13b": "80537f9eead1a5bfa72d5ac6ea6414379be41d4d4f6679fd776e9535d1eb58bb", + 'black-forest-labs/flux-schnell': "f2ab8a5bfe79f02f0789a146cf5e73d2a4ff2684a98c2b303d1e1ff3814271db", + 'stability-ai/stable-diffusion-3': "527d2a6296facb8e47ba1eaf17f142c240c19a30894f437feee9b91cc29d8e4f", + 'bytedance/sdxl-lightning-4step': "5f24084160c9089501c1b3545d9be3c27883ae2239b6f412990e82d4a6210f8f", + 'playgroundai/playground-v2.5-1024px-aesthetic': "a45f82a1382bed5c7aeb861dac7c7d191b0fdf74d8d57c4a0e6ed7d4d0bf7d24", } @classmethod @@ -69,84 +71,73 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin): cls, model: str, messages: Messages, - **kwargs: Any - ) -> Generator[Union[str, ImageResponse], None, None]: - yield await cls.create_async(messages[-1]["content"], model, **kwargs) - - @classmethod - async def create_async( - cls, - prompt: str, - model: str, - api_key: Optional[str] = None, - proxy: Optional[str] = None, - timeout: int = 180, - version: Optional[str] = None, - extra_data: Dict[str, Any] = {}, - **kwargs: Any - ) -> Union[str, ImageResponse]: - model = cls.get_model(model) # Use the get_model method to resolve model name + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + headers = { - 'Accept-Encoding': 'gzip, deflate, br', - 'Accept-Language': 'en-US', - 'Connection': 'keep-alive', - 'Origin': cls.url, - 'Referer': f'{cls.url}/', - 'Sec-Fetch-Dest': 'empty', - 'Sec-Fetch-Mode': 'cors', - 'Sec-Fetch-Site': 'same-site', - 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36', - 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"macOS"', + "accept": "*/*", + "accept-language": "en-US,en;q=0.9", + "cache-control": "no-cache", + "content-type": "application/json", + "origin": "https://replicate.com", + "pragma": "no-cache", + "priority": "u=1, i", + "referer": "https://replicate.com/", + "sec-ch-ua": '"Not;A=Brand";v="24", "Chromium";v="128"', + "sec-ch-ua-mobile": "?0", + "sec-ch-ua-platform": '"Linux"', + "sec-fetch-dest": "empty", + "sec-fetch-mode": "cors", + "sec-fetch-site": "same-site", + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36" } - - if version is None: - version = random.choice(cls.versions.get(model, [])) - if api_key is not None: - headers["Authorization"] = f"Bearer {api_key}" - - async with StreamSession( - proxies={"all": proxy}, - headers=headers, - timeout=timeout - ) as session: + + async with ClientSession(headers=headers) as session: + if model in cls.image_models: + prompt = messages[-1]['content'] if messages else "" + else: + prompt = format_prompt(messages) + data = { - "input": { - "prompt": prompt, - **extra_data - }, - "version": version + "model": model, + "version": cls.model_versions[model], + "input": {"prompt": prompt}, } - if api_key is None: - data["model"] = model - url = "https://homepage.replicate.com/api/prediction" - else: - url = "https://api.replicate.com/v1/predictions" - async with session.post(url, json=data) as response: - await raise_for_status(response) + + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() result = await response.json() - if "id" not in result: - raise ResponseError(f"Invalid response: {result}") + prediction_id = result['id'] + + poll_url = f"https://homepage.replicate.com/api/poll?id={prediction_id}" + max_attempts = 30 + delay = 5 + for _ in range(max_attempts): + async with session.get(poll_url, proxy=proxy) as response: + response.raise_for_status() + try: + result = await response.json() + except ContentTypeError: + text = await response.text() + try: + result = json.loads(text) + except json.JSONDecodeError: + raise ValueError(f"Unexpected response format: {text}") - while True: - if api_key is None: - url = f"https://homepage.replicate.com/api/poll?id={result['id']}" - else: - url = f"https://api.replicate.com/v1/predictions/{result['id']}" - async with session.get(url) as response: - await raise_for_status(response) - result = await response.json() - if "status" not in result: - raise ResponseError(f"Invalid response: {result}") - if result["status"] == "succeeded": - output = result['output'] - if model in cls.text_models: - return ''.join(output) if isinstance(output, list) else output - elif model in cls.image_models: - images: List[Any] = output - images = images[0] if len(images) == 1 else images - return ImageResponse(images, prompt) - elif result["status"] == "failed": - raise ResponseError(f"Prediction failed: {result}") - await asyncio.sleep(0.5) + if result['status'] == 'succeeded': + if model in cls.image_models: + image_url = result['output'][0] + yield ImageResponse(image_url, "Generated image") + return + else: + for chunk in result['output']: + yield chunk + break + elif result['status'] == 'failed': + raise Exception(f"Prediction failed: {result.get('error')}") + await asyncio.sleep(delay) + + if result['status'] != 'succeeded': + raise Exception("Prediction timed out") -- cgit v1.2.3