summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--docs/providers-and-models.md2
-rw-r--r--g4f/Provider/Nexra.py118
-rw-r--r--g4f/Provider/nexra/NexraBing.py82
-rw-r--r--g4f/Provider/nexra/NexraChatGPT.py66
-rw-r--r--g4f/Provider/nexra/NexraChatGPT4o.py52
-rw-r--r--g4f/Provider/nexra/NexraChatGPTWeb.py53
-rw-r--r--g4f/Provider/nexra/NexraGeminiPro.py52
-rw-r--r--g4f/Provider/nexra/NexraImageURL.py46
-rw-r--r--g4f/Provider/nexra/NexraLlama.py52
-rw-r--r--g4f/Provider/nexra/NexraQwen.py52
-rw-r--r--g4f/Provider/nexra/__init__.py1
-rw-r--r--g4f/models.py32
12 files changed, 533 insertions, 75 deletions
diff --git a/docs/providers-and-models.md b/docs/providers-and-models.md
index 527347db..4e105cad 100644
--- a/docs/providers-and-models.md
+++ b/docs/providers-and-models.md
@@ -51,7 +51,7 @@
|[magickpen.com](https://magickpen.com)|`g4f.Provider.MagickPen`|`gpt-4o-mini`|❌|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[meta.ai](https://www.meta.ai)|`g4f.Provider.MetaAI`|✔|✔|?|?|![Active](https://img.shields.io/badge/Active-brightgreen)|✔|
|[app.myshell.ai/chat](https://app.myshell.ai/chat)|`g4f.Provider.MyShell`|✔|❌|?|?|![Disabled](https://img.shields.io/badge/Disabled-red)|❌|
-|[aryahcr.cc](https://nexra.aryahcr.cc)|`g4f.Provider.Nexra`|`gpt-3, gpt-3.5-turbo, gpt-4`|`dalle, dalle-2, dalle-mini, emi`|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
+|[aryahcr.cc](https://nexra.aryahcr.cc)|`g4f.Provider.Nexra`|`gpt-3, gpt-3.5-turbo, gpt-4, gpt-4o, gemini-pro, llama-3.1, qwen`|`dalle, dalle-2, dalle-mini, emi, sdxl-turbo, prodia`|❌|✔|![Active](https://img.shields.io/badge/Active-brightgreen)|❌|
|[openrouter.ai](https://openrouter.ai)|`g4f.Provider.OpenRouter`|✔|❌|?|?|![Disabled](https://img.shields.io/badge/Disabled-red)|❌|
|[platform.openai.com](https://platform.openai.com/)|`g4f.Provider.Openai`|✔|❌|✔||![Unknown](https://img.shields.io/badge/Unknown-grey)|✔|
|[chatgpt.com](https://chatgpt.com/)|`g4f.Provider.OpenaiChat`|`gpt-4o, gpt-4o-mini, gpt-4`|❌|✔||![Unknown](https://img.shields.io/badge/Unknown-grey)|✔|
diff --git a/g4f/Provider/Nexra.py b/g4f/Provider/Nexra.py
index b2b83837..33e794f6 100644
--- a/g4f/Provider/Nexra.py
+++ b/g4f/Provider/Nexra.py
@@ -1,32 +1,49 @@
from __future__ import annotations
-import json
-from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
+from aiohttp import ClientSession
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
-from ..image import ImageResponse
+from .nexra.NexraBing import NexraBing
+from .nexra.NexraChatGPT import NexraChatGPT
+from .nexra.NexraChatGPT4o import NexraChatGPT4o
+from .nexra.NexraChatGPTWeb import NexraChatGPTWeb
+from .nexra.NexraGeminiPro import NexraGeminiPro
+from .nexra.NexraImageURL import NexraImageURL
+from .nexra.NexraLlama import NexraLlama
+from .nexra.NexraQwen import NexraQwen
class Nexra(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://nexra.aryahcr.cc"
- chat_api_endpoint = "https://nexra.aryahcr.cc/api/chat/gpt"
- image_api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
working = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
+ supports_stream = True
supports_system_message = True
supports_message_history = True
-
default_model = 'gpt-3.5-turbo'
- text_models = [
- 'gpt-4', 'gpt-4-0613', 'gpt-4-32k', 'gpt-4-0314', 'gpt-4-32k-0314',
- 'gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0301',
- 'gpt-3', 'text-davinci-003', 'text-davinci-002', 'code-davinci-002',
- 'text-curie-001', 'text-babbage-001', 'text-ada-001',
- 'davinci', 'curie', 'babbage', 'ada', 'babbage-002', 'davinci-002',
- ]
- image_models = ['dalle', 'dalle2', 'dalle-mini', 'emi']
- models = [*text_models, *image_models]
+ image_model = 'sdxl-turbo'
+
+ models = (
+ *NexraBing.models,
+ *NexraChatGPT.models,
+ *NexraChatGPT4o.models,
+ *NexraChatGPTWeb.models,
+ *NexraGeminiPro.models,
+ *NexraImageURL.models,
+ *NexraLlama.models,
+ *NexraQwen.models,
+ )
+
+ model_to_provider = {
+ **{model: NexraChatGPT for model in NexraChatGPT.models},
+ **{model: NexraChatGPT4o for model in NexraChatGPT4o.models},
+ **{model: NexraChatGPTWeb for model in NexraChatGPTWeb.models},
+ **{model: NexraGeminiPro for model in NexraGeminiPro.models},
+ **{model: NexraImageURL for model in NexraImageURL.models},
+ **{model: NexraLlama for model in NexraLlama.models},
+ **{model: NexraQwen for model in NexraQwen.models},
+ **{model: NexraBing for model in NexraBing.models},
+ }
model_aliases = {
"gpt-4": "gpt-4-0613",
@@ -53,12 +70,20 @@ class Nexra(AsyncGeneratorProvider, ProviderModelMixin):
"gpt-3": "babbage-002",
"gpt-3": "davinci-002",
+ "gpt-4": "gptweb",
+
+ "gpt-4": "Bing (Balanced)",
+ "gpt-4": "Bing (Creative)",
+ "gpt-4": "Bing (Precise)",
+
"dalle-2": "dalle2",
+ "sdxl": "sdxl-turbo",
}
-
+
+
@classmethod
def get_model(cls, model: str) -> str:
- if model in cls.text_models or model in cls.image_models:
+ if model in cls.models:
return model
elif model in cls.model_aliases:
return cls.model_aliases[model]
@@ -66,6 +91,14 @@ class Nexra(AsyncGeneratorProvider, ProviderModelMixin):
return cls.default_model
@classmethod
+ def get_api_endpoint(cls, model: str) -> str:
+ provider_class = cls.model_to_provider.get(model)
+
+ if provider_class:
+ return provider_class.api_endpoint
+ raise ValueError(f"API endpoint for model {model} not found.")
+
+ @classmethod
async def create_async_generator(
cls,
model: str,
@@ -74,43 +107,12 @@ class Nexra(AsyncGeneratorProvider, ProviderModelMixin):
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
-
- headers = {
- "Content-Type": "application/json",
- }
-
- async with ClientSession(headers=headers) as session:
- if model in cls.image_models:
- # Image generation
- prompt = messages[-1]['content'] if messages else ""
- data = {
- "prompt": prompt,
- "model": model,
- "response": "url"
- }
- async with session.post(cls.image_api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
- result = await response.text()
- result_json = json.loads(result.strip('_'))
- image_url = result_json['images'][0] if result_json['images'] else None
-
- if image_url:
- yield ImageResponse(images=image_url, alt=prompt)
- else:
- # Text completion
- data = {
- "messages": messages,
- "prompt": format_prompt(messages),
- "model": model,
- "markdown": False
- }
- async with session.post(cls.chat_api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
- result = await response.text()
-
- try:
- json_response = json.loads(result)
- gpt_response = json_response.get('gpt', '')
- yield gpt_response
- except json.JSONDecodeError:
- yield result
+ api_endpoint = cls.get_api_endpoint(model)
+
+ provider_class = cls.model_to_provider.get(model)
+
+ if provider_class:
+ async for response in provider_class.create_async_generator(model, messages, proxy, **kwargs):
+ yield response
+ else:
+ raise ValueError(f"Provider for model {model} not found.")
diff --git a/g4f/Provider/nexra/NexraBing.py b/g4f/Provider/nexra/NexraBing.py
new file mode 100644
index 00000000..59e06a3d
--- /dev/null
+++ b/g4f/Provider/nexra/NexraBing.py
@@ -0,0 +1,82 @@
+from __future__ import annotations
+from aiohttp import ClientSession
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+import json
+
+class NexraBing(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Nexra Bing"
+ api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
+
+ bing_models = {
+ 'Bing (Balanced)': 'Balanced',
+ 'Bing (Creative)': 'Creative',
+ 'Bing (Precise)': 'Precise'
+ }
+
+ models = [*bing_models.keys()]
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "Content-Type": "application/json",
+ "Accept": "application/json",
+ "Origin": cls.url or "https://default-url.com",
+ "Referer": f"{cls.url}/chat" if cls.url else "https://default-url.com/chat",
+ }
+
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ if prompt is None:
+ raise ValueError("Prompt cannot be None")
+
+ data = {
+ "messages": [
+ {
+ "role": "user",
+ "content": prompt
+ }
+ ],
+ "conversation_style": cls.bing_models.get(model, 'Balanced'),
+ "markdown": False,
+ "stream": True,
+ "model": "Bing"
+ }
+
+ full_response = ""
+ last_message = ""
+
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+
+ async for line in response.content:
+ if line:
+ raw_data = line.decode('utf-8').strip()
+
+ parts = raw_data.split('')
+ for part in parts:
+ if part:
+ try:
+ json_data = json.loads(part)
+ except json.JSONDecodeError:
+ continue
+
+ if json_data.get("error"):
+ raise Exception("Error in API response")
+
+ if json_data.get("finish"):
+ break
+
+ if message := json_data.get("message"):
+ if message != last_message:
+ full_response = message
+ last_message = message
+
+ yield full_response.strip()
diff --git a/g4f/Provider/nexra/NexraChatGPT.py b/g4f/Provider/nexra/NexraChatGPT.py
new file mode 100644
index 00000000..8ed83f98
--- /dev/null
+++ b/g4f/Provider/nexra/NexraChatGPT.py
@@ -0,0 +1,66 @@
+from __future__ import annotations
+from aiohttp import ClientSession
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+import json
+
+class NexraChatGPT(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Nexra ChatGPT"
+ api_endpoint = "https://nexra.aryahcr.cc/api/chat/gpt"
+
+ models = [
+ 'gpt-4', 'gpt-4-0613', 'gpt-4-32k', 'gpt-4-0314', 'gpt-4-32k-0314',
+ 'gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613',
+ 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0301',
+ 'gpt-3', 'text-davinci-003', 'text-davinci-002', 'code-davinci-002',
+ 'text-curie-001', 'text-babbage-001', 'text-ada-001',
+ 'davinci', 'curie', 'babbage', 'ada', 'babbage-002', 'davinci-002',
+ ]
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "Accept": "application/json",
+ "Content-Type": "application/json",
+ "Referer": f"{cls.url}/chat",
+ }
+
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ data = {
+ "prompt": prompt,
+ "model": model,
+ "markdown": False,
+ "messages": messages or [],
+ }
+
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+
+ content_type = response.headers.get('Content-Type', '')
+ if 'application/json' in content_type:
+ result = await response.json()
+ if result.get("status"):
+ yield result.get("gpt", "")
+ else:
+ raise Exception(f"Error in response: {result.get('message', 'Unknown error')}")
+ elif 'text/plain' in content_type:
+ text = await response.text()
+ try:
+ result = json.loads(text)
+ if result.get("status"):
+ yield result.get("gpt", "")
+ else:
+ raise Exception(f"Error in response: {result.get('message', 'Unknown error')}")
+ except json.JSONDecodeError:
+ yield text # If not JSON, return text
+ else:
+ raise Exception(f"Unexpected response type: {content_type}. Response text: {await response.text()}")
+
diff --git a/g4f/Provider/nexra/NexraChatGPT4o.py b/g4f/Provider/nexra/NexraChatGPT4o.py
new file mode 100644
index 00000000..eb18d439
--- /dev/null
+++ b/g4f/Provider/nexra/NexraChatGPT4o.py
@@ -0,0 +1,52 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+
+class NexraChatGPT4o(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Nexra GPT-4o"
+ api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
+ models = ['gpt-4o']
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "Content-Type": "application/json"
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "messages": [
+ {'role': 'assistant', 'content': ''},
+ {'role': 'user', 'content': format_prompt(messages)}
+ ],
+ "markdown": False,
+ "stream": True,
+ "model": model
+ }
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ full_response = ''
+ async for line in response.content:
+ if line:
+ messages = line.decode('utf-8').split('\x1e')
+ for message_str in messages:
+ try:
+ message = json.loads(message_str)
+ if message.get('message'):
+ full_response = message['message']
+ if message.get('finish'):
+ yield full_response.strip()
+ return
+ except json.JSONDecodeError:
+ pass
diff --git a/g4f/Provider/nexra/NexraChatGPTWeb.py b/g4f/Provider/nexra/NexraChatGPTWeb.py
new file mode 100644
index 00000000..e7738665
--- /dev/null
+++ b/g4f/Provider/nexra/NexraChatGPTWeb.py
@@ -0,0 +1,53 @@
+from __future__ import annotations
+from aiohttp import ClientSession
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+import json
+
+class NexraChatGPTWeb(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Nexra ChatGPT Web"
+ api_endpoint = "https://nexra.aryahcr.cc/api/chat/gptweb"
+ models = ['gptweb']
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "Content-Type": "application/json",
+ }
+
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ if prompt is None:
+ raise ValueError("Prompt cannot be None")
+
+ data = {
+ "prompt": prompt,
+ "markdown": False
+ }
+
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+
+ full_response = ""
+ async for chunk in response.content:
+ if chunk:
+ result = chunk.decode("utf-8").strip()
+
+ try:
+ json_data = json.loads(result)
+
+ if json_data.get("status"):
+ full_response = json_data.get("gpt", "")
+ else:
+ full_response = f"Error: {json_data.get('message', 'Unknown error')}"
+ except json.JSONDecodeError:
+ full_response = "Error: Invalid JSON response."
+
+ yield full_response.strip()
diff --git a/g4f/Provider/nexra/NexraGeminiPro.py b/g4f/Provider/nexra/NexraGeminiPro.py
new file mode 100644
index 00000000..a57daed4
--- /dev/null
+++ b/g4f/Provider/nexra/NexraGeminiPro.py
@@ -0,0 +1,52 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+
+class NexraGeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Nexra Gemini PRO"
+ api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
+ models = ['gemini-pro']
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "Content-Type": "application/json"
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "messages": [
+ {'role': 'assistant', 'content': ''},
+ {'role': 'user', 'content': format_prompt(messages)}
+ ],
+ "markdown": False,
+ "stream": True,
+ "model": model
+ }
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ full_response = ''
+ async for line in response.content:
+ if line:
+ messages = line.decode('utf-8').split('\x1e')
+ for message_str in messages:
+ try:
+ message = json.loads(message_str)
+ if message.get('message'):
+ full_response = message['message']
+ if message.get('finish'):
+ yield full_response.strip()
+ return
+ except json.JSONDecodeError:
+ pass
diff --git a/g4f/Provider/nexra/NexraImageURL.py b/g4f/Provider/nexra/NexraImageURL.py
new file mode 100644
index 00000000..13d70757
--- /dev/null
+++ b/g4f/Provider/nexra/NexraImageURL.py
@@ -0,0 +1,46 @@
+from __future__ import annotations
+from aiohttp import ClientSession
+import json
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+from ...image import ImageResponse
+
+class NexraImageURL(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Image Generation Provider"
+ api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
+ models = ['dalle', 'dalle2', 'dalle-mini', 'emi', 'sdxl-turbo', 'prodia']
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "Content-Type": "application/json",
+ }
+
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ data = {
+ "prompt": prompt,
+ "model": model,
+ "response": "url"
+ }
+
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ response_text = await response.text()
+
+ cleaned_response = response_text.lstrip('_')
+ response_json = json.loads(cleaned_response)
+
+ images = response_json.get("images")
+ if images and len(images) > 0:
+ image_response = ImageResponse(images[0], alt="Generated Image")
+ yield image_response
+ else:
+ yield "No image URL found."
diff --git a/g4f/Provider/nexra/NexraLlama.py b/g4f/Provider/nexra/NexraLlama.py
new file mode 100644
index 00000000..9ed892e8
--- /dev/null
+++ b/g4f/Provider/nexra/NexraLlama.py
@@ -0,0 +1,52 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+
+class NexraLlama(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Nexra LLaMA 3.1"
+ api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
+ models = ['llama-3.1']
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "Content-Type": "application/json"
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "messages": [
+ {'role': 'assistant', 'content': ''},
+ {'role': 'user', 'content': format_prompt(messages)}
+ ],
+ "markdown": False,
+ "stream": True,
+ "model": model
+ }
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ full_response = ''
+ async for line in response.content:
+ if line:
+ messages = line.decode('utf-8').split('\x1e')
+ for message_str in messages:
+ try:
+ message = json.loads(message_str)
+ if message.get('message'):
+ full_response = message['message']
+ if message.get('finish'):
+ yield full_response.strip()
+ return
+ except json.JSONDecodeError:
+ pass
diff --git a/g4f/Provider/nexra/NexraQwen.py b/g4f/Provider/nexra/NexraQwen.py
new file mode 100644
index 00000000..ae8e9a0e
--- /dev/null
+++ b/g4f/Provider/nexra/NexraQwen.py
@@ -0,0 +1,52 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+
+class NexraQwen(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Nexra Qwen"
+ api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
+ models = ['qwen']
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "Content-Type": "application/json"
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "messages": [
+ {'role': 'assistant', 'content': ''},
+ {'role': 'user', 'content': format_prompt(messages)}
+ ],
+ "markdown": False,
+ "stream": True,
+ "model": model
+ }
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ full_response = ''
+ async for line in response.content:
+ if line:
+ messages = line.decode('utf-8').split('\x1e')
+ for message_str in messages:
+ try:
+ message = json.loads(message_str)
+ if message.get('message'):
+ full_response = message['message']
+ if message.get('finish'):
+ yield full_response.strip()
+ return
+ except json.JSONDecodeError:
+ pass
diff --git a/g4f/Provider/nexra/__init__.py b/g4f/Provider/nexra/__init__.py
new file mode 100644
index 00000000..8b137891
--- /dev/null
+++ b/g4f/Provider/nexra/__init__.py
@@ -0,0 +1 @@
+
diff --git a/g4f/models.py b/g4f/models.py
index 8a8d4e18..2940b96a 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -115,7 +115,7 @@ gpt_4o = Model(
name = 'gpt-4o',
base_provider = 'OpenAI',
best_provider = IterListProvider([
- Liaobots, Airforce, Chatgpt4o, ChatGptEs,
+ Liaobots, Nexra, Airforce, Chatgpt4o, ChatGptEs,
OpenaiChat
])
)
@@ -211,7 +211,7 @@ llama_3_1_405b = Model(
llama_3_1 = Model(
name = "llama-3.1",
base_provider = "Meta Llama",
- best_provider = IterListProvider([llama_3_1_8b.best_provider, llama_3_1_70b.best_provider, llama_3_1_405b.best_provider,])
+ best_provider = IterListProvider([Nexra, llama_3_1_8b.best_provider, llama_3_1_70b.best_provider, llama_3_1_405b.best_provider,])
)
@@ -273,7 +273,7 @@ phi_3_5_mini = Model(
gemini_pro = Model(
name = 'gemini-pro',
base_provider = 'Google DeepMind',
- best_provider = IterListProvider([GeminiPro, LiteIcoding, Blackbox, AIChatFree, GPROChat, Liaobots, Airforce])
+ best_provider = IterListProvider([GeminiPro, LiteIcoding, Blackbox, AIChatFree, GPROChat, Nexra, Liaobots, Airforce])
)
gemini_flash = Model(
@@ -285,10 +285,7 @@ gemini_flash = Model(
gemini = Model(
name = 'gemini',
base_provider = 'Google DeepMind',
- best_provider = IterListProvider([
- Gemini,
- gemini_flash.best_provider, gemini_pro.best_provider
- ])
+ best_provider = IterListProvider([Gemini, gemini_flash.best_provider, gemini_pro.best_provider])
)
# gemma
@@ -458,9 +455,7 @@ qwen_turbo = Model(
qwen = Model(
name = 'qwen',
base_provider = 'Qwen',
- best_provider = IterListProvider([
- qwen_1_5_14b.best_provider, qwen_1_5_72b.best_provider, qwen_1_5_110b.best_provider, qwen_2_72b.best_provider, qwen_turbo.best_provider
- ])
+ best_provider = IterListProvider([Nexra, qwen_1_5_14b.best_provider, qwen_1_5_72b.best_provider, qwen_1_5_110b.best_provider, qwen_2_72b.best_provider, qwen_turbo.best_provider])
)
@@ -639,7 +634,7 @@ sonar_chat = Model(
sdxl = Model(
name = 'sdxl',
base_provider = 'Stability AI',
- best_provider = IterListProvider([ReplicateHome, DeepInfraImage])
+ best_provider = IterListProvider([ReplicateHome, Nexra, DeepInfraImage])
)
@@ -734,10 +729,7 @@ dalle_3 = Model(
dalle = Model(
name = 'dalle',
base_provider = '',
- best_provider = IterListProvider([
- Nexra,
- dalle_2.best_provider, dalle_3.best_provider,
- ])
+ best_provider = IterListProvider([Nexra, dalle_2.best_provider, dalle_3.best_provider])
)
@@ -748,7 +740,7 @@ dalle_mini = Model(
)
-### ###
+### Other ###
emi = Model(
name = 'emi',
base_provider = '',
@@ -763,6 +755,13 @@ any_dark = Model(
)
+prodia = Model(
+ name = 'prodia',
+ base_provider = '',
+ best_provider = IterListProvider([Nexra])
+
+)
+
class ModelUtils:
"""
Utility class for mapping string identifiers to Model instances.
@@ -985,6 +984,7 @@ class ModelUtils:
'dalle-mini': dalle_mini,
'emi': emi,
'any-dark': any_dark,
+'prodia': prodia,
}
_all_models = list(ModelUtils.convert.keys())