summaryrefslogtreecommitdiffstats
path: root/g4f
diff options
context:
space:
mode:
authorkqlio67 <kqlio67@users.noreply.github.com>2024-10-11 08:33:30 +0200
committerkqlio67 <kqlio67@users.noreply.github.com>2024-10-11 08:33:30 +0200
commita9bc67362f2be529fe9165ebb13347195ba1ddcf (patch)
tree1a91836eaa94f14c18ad5d55f687ff8a2118c357 /g4f
parentfeat(g4f/Provider/__init__.py): add new providers and update imports (diff)
downloadgpt4free-a9bc67362f2be529fe9165ebb13347195ba1ddcf.tar
gpt4free-a9bc67362f2be529fe9165ebb13347195ba1ddcf.tar.gz
gpt4free-a9bc67362f2be529fe9165ebb13347195ba1ddcf.tar.bz2
gpt4free-a9bc67362f2be529fe9165ebb13347195ba1ddcf.tar.lz
gpt4free-a9bc67362f2be529fe9165ebb13347195ba1ddcf.tar.xz
gpt4free-a9bc67362f2be529fe9165ebb13347195ba1ddcf.tar.zst
gpt4free-a9bc67362f2be529fe9165ebb13347195ba1ddcf.zip
Diffstat (limited to 'g4f')
-rw-r--r--g4f/Provider/Nexra.py138
-rw-r--r--g4f/Provider/nexra/NexraAnimagineXL.py66
-rw-r--r--g4f/Provider/nexra/NexraBing.py106
-rw-r--r--g4f/Provider/nexra/NexraBlackbox.py101
-rw-r--r--g4f/Provider/nexra/NexraChatGPT.py97
-rw-r--r--g4f/Provider/nexra/NexraChatGPT4o.py66
-rw-r--r--g4f/Provider/nexra/NexraChatGPTWeb.py53
-rw-r--r--g4f/Provider/nexra/NexraChatGptV2.py93
-rw-r--r--g4f/Provider/nexra/NexraChatGptWeb.py69
-rw-r--r--g4f/Provider/nexra/NexraDallE.py66
-rw-r--r--g4f/Provider/nexra/NexraDallE2.py74
-rw-r--r--g4f/Provider/nexra/NexraDalleMini.py66
-rw-r--r--g4f/Provider/nexra/NexraEmi.py66
-rw-r--r--g4f/Provider/nexra/NexraFluxPro.py74
-rw-r--r--g4f/Provider/nexra/NexraGeminiPro.py70
-rw-r--r--g4f/Provider/nexra/NexraImageURL.py46
-rw-r--r--g4f/Provider/nexra/NexraLLaMA31.py83
-rw-r--r--g4f/Provider/nexra/NexraLlama.py52
-rw-r--r--g4f/Provider/nexra/NexraMidjourney.py66
-rw-r--r--g4f/Provider/nexra/NexraProdiaAI.py147
-rw-r--r--g4f/Provider/nexra/NexraQwen.py72
-rw-r--r--g4f/Provider/nexra/NexraSD15.py70
-rw-r--r--g4f/Provider/nexra/NexraSD21.py75
-rw-r--r--g4f/Provider/nexra/NexraSDLora.py68
-rw-r--r--g4f/Provider/nexra/NexraSDTurbo.py68
-rw-r--r--g4f/Provider/nexra/__init__.py22
26 files changed, 1576 insertions, 398 deletions
diff --git a/g4f/Provider/Nexra.py b/g4f/Provider/Nexra.py
index 33e794f6..5fcdd242 100644
--- a/g4f/Provider/Nexra.py
+++ b/g4f/Provider/Nexra.py
@@ -1,102 +1,25 @@
from __future__ import annotations
from aiohttp import ClientSession
+import json
+
+from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
-from .nexra.NexraBing import NexraBing
-from .nexra.NexraChatGPT import NexraChatGPT
-from .nexra.NexraChatGPT4o import NexraChatGPT4o
-from .nexra.NexraChatGPTWeb import NexraChatGPTWeb
-from .nexra.NexraGeminiPro import NexraGeminiPro
-from .nexra.NexraImageURL import NexraImageURL
-from .nexra.NexraLlama import NexraLlama
-from .nexra.NexraQwen import NexraQwen
+from ..image import ImageResponse
+
class Nexra(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://nexra.aryahcr.cc"
+ label = "Nexra Animagine XL"
+ url = "https://nexra.aryahcr.cc/documentation/midjourney/en"
+ api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
working = True
- supports_gpt_35_turbo = True
- supports_gpt_4 = True
- supports_stream = True
- supports_system_message = True
- supports_message_history = True
- default_model = 'gpt-3.5-turbo'
- image_model = 'sdxl-turbo'
-
- models = (
- *NexraBing.models,
- *NexraChatGPT.models,
- *NexraChatGPT4o.models,
- *NexraChatGPTWeb.models,
- *NexraGeminiPro.models,
- *NexraImageURL.models,
- *NexraLlama.models,
- *NexraQwen.models,
- )
-
- model_to_provider = {
- **{model: NexraChatGPT for model in NexraChatGPT.models},
- **{model: NexraChatGPT4o for model in NexraChatGPT4o.models},
- **{model: NexraChatGPTWeb for model in NexraChatGPTWeb.models},
- **{model: NexraGeminiPro for model in NexraGeminiPro.models},
- **{model: NexraImageURL for model in NexraImageURL.models},
- **{model: NexraLlama for model in NexraLlama.models},
- **{model: NexraQwen for model in NexraQwen.models},
- **{model: NexraBing for model in NexraBing.models},
- }
-
- model_aliases = {
- "gpt-4": "gpt-4-0613",
- "gpt-4": "gpt-4-32k",
- "gpt-4": "gpt-4-0314",
- "gpt-4": "gpt-4-32k-0314",
-
- "gpt-3.5-turbo": "gpt-3.5-turbo-16k",
- "gpt-3.5-turbo": "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613",
- "gpt-3.5-turbo": "gpt-3.5-turbo-0301",
-
- "gpt-3": "text-davinci-003",
- "gpt-3": "text-davinci-002",
- "gpt-3": "code-davinci-002",
- "gpt-3": "text-curie-001",
- "gpt-3": "text-babbage-001",
- "gpt-3": "text-ada-001",
- "gpt-3": "text-ada-001",
- "gpt-3": "davinci",
- "gpt-3": "curie",
- "gpt-3": "babbage",
- "gpt-3": "ada",
- "gpt-3": "babbage-002",
- "gpt-3": "davinci-002",
-
- "gpt-4": "gptweb",
-
- "gpt-4": "Bing (Balanced)",
- "gpt-4": "Bing (Creative)",
- "gpt-4": "Bing (Precise)",
-
- "dalle-2": "dalle2",
- "sdxl": "sdxl-turbo",
- }
+ default_model = 'animagine-xl'
+ models = [default_model]
@classmethod
def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases[model]
- else:
- return cls.default_model
-
- @classmethod
- def get_api_endpoint(cls, model: str) -> str:
- provider_class = cls.model_to_provider.get(model)
-
- if provider_class:
- return provider_class.api_endpoint
- raise ValueError(f"API endpoint for model {model} not found.")
+ return cls.default_model
@classmethod
async def create_async_generator(
@@ -104,15 +27,40 @@ class Nexra(AsyncGeneratorProvider, ProviderModelMixin):
model: str,
messages: Messages,
proxy: str = None,
+ response: str = "url", # base64 or url
**kwargs
) -> AsyncResult:
+ # Retrieve the correct model to use
model = cls.get_model(model)
- api_endpoint = cls.get_api_endpoint(model)
- provider_class = cls.model_to_provider.get(model)
+ # Format the prompt from the messages
+ prompt = messages[0]['content']
+
+ headers = {
+ "Content-Type": "application/json"
+ }
+ payload = {
+ "prompt": prompt,
+ "model": model,
+ "response": response
+ }
+
+ async with ClientSession(headers=headers) as session:
+ async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response:
+ response.raise_for_status()
+ text_data = await response.text()
- if provider_class:
- async for response in provider_class.create_async_generator(model, messages, proxy, **kwargs):
- yield response
- else:
- raise ValueError(f"Provider for model {model} not found.")
+ try:
+ # Parse the JSON response
+ json_start = text_data.find('{')
+ json_data = text_data[json_start:]
+ data = json.loads(json_data)
+
+ # Check if the response contains images
+ if 'images' in data and len(data['images']) > 0:
+ image_url = data['images'][0]
+ yield ImageResponse(image_url, prompt)
+ else:
+ yield ImageResponse("No images found in the response.", prompt)
+ except json.JSONDecodeError:
+ yield ImageResponse("Failed to parse JSON. Response might not be in JSON format.", prompt)
diff --git a/g4f/Provider/nexra/NexraAnimagineXL.py b/g4f/Provider/nexra/NexraAnimagineXL.py
new file mode 100644
index 00000000..d6fbc629
--- /dev/null
+++ b/g4f/Provider/nexra/NexraAnimagineXL.py
@@ -0,0 +1,66 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import json
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...image import ImageResponse
+
+
+class NexraAnimagineXL(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Nexra Animagine XL"
+ url = "https://nexra.aryahcr.cc/documentation/midjourney/en"
+ api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
+ working = True
+
+ default_model = 'animagine-xl'
+ models = [default_model]
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ response: str = "url", # base64 or url
+ **kwargs
+ ) -> AsyncResult:
+ # Retrieve the correct model to use
+ model = cls.get_model(model)
+
+ # Format the prompt from the messages
+ prompt = messages[0]['content']
+
+ headers = {
+ "Content-Type": "application/json"
+ }
+ payload = {
+ "prompt": prompt,
+ "model": model,
+ "response": response
+ }
+
+ async with ClientSession(headers=headers) as session:
+ async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response:
+ response.raise_for_status()
+ text_data = await response.text()
+
+ try:
+ # Parse the JSON response
+ json_start = text_data.find('{')
+ json_data = text_data[json_start:]
+ data = json.loads(json_data)
+
+ # Check if the response contains images
+ if 'images' in data and len(data['images']) > 0:
+ image_url = data['images'][0]
+ yield ImageResponse(image_url, prompt)
+ else:
+ yield ImageResponse("No images found in the response.", prompt)
+ except json.JSONDecodeError:
+ yield ImageResponse("Failed to parse JSON. Response might not be in JSON format.", prompt)
diff --git a/g4f/Provider/nexra/NexraBing.py b/g4f/Provider/nexra/NexraBing.py
index 59e06a3d..02f3724d 100644
--- a/g4f/Provider/nexra/NexraBing.py
+++ b/g4f/Provider/nexra/NexraBing.py
@@ -1,21 +1,42 @@
from __future__ import annotations
+
from aiohttp import ClientSession
+from aiohttp.client_exceptions import ContentTypeError
+
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt
import json
+
class NexraBing(AsyncGeneratorProvider, ProviderModelMixin):
label = "Nexra Bing"
+ url = "https://nexra.aryahcr.cc/documentation/bing/en"
api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
-
- bing_models = {
- 'Bing (Balanced)': 'Balanced',
- 'Bing (Creative)': 'Creative',
- 'Bing (Precise)': 'Precise'
- }
+ working = True
+ supports_gpt_4 = False
+ supports_stream = False
- models = [*bing_models.keys()]
+ default_model = 'Bing (Balanced)'
+ models = ['Bing (Balanced)', 'Bing (Creative)', 'Bing (Precise)']
+
+ model_aliases = {
+ "gpt-4": "Bing (Balanced)",
+ "gpt-4": "Bing (Creative)",
+ "gpt-4": "Bing (Precise)",
+ }
+
+ @classmethod
+ def get_model_and_style(cls, model: str) -> tuple[str, str]:
+ # Default to the default model if not found
+ model = cls.model_aliases.get(model, model)
+ if model not in cls.models:
+ model = cls.default_model
+
+ # Extract the base model and conversation style
+ base_model, conversation_style = model.split(' (')
+ conversation_style = conversation_style.rstrip(')')
+ return base_model, conversation_style
@classmethod
async def create_async_generator(
@@ -23,20 +44,19 @@ class NexraBing(AsyncGeneratorProvider, ProviderModelMixin):
model: str,
messages: Messages,
proxy: str = None,
+ stream: bool = False,
+ markdown: bool = False,
**kwargs
) -> AsyncResult:
+ base_model, conversation_style = cls.get_model_and_style(model)
+
headers = {
"Content-Type": "application/json",
- "Accept": "application/json",
- "Origin": cls.url or "https://default-url.com",
- "Referer": f"{cls.url}/chat" if cls.url else "https://default-url.com/chat",
+ "origin": cls.url,
+ "referer": f"{cls.url}/chat",
}
-
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
- if prompt is None:
- raise ValueError("Prompt cannot be None")
-
data = {
"messages": [
{
@@ -44,39 +64,33 @@ class NexraBing(AsyncGeneratorProvider, ProviderModelMixin):
"content": prompt
}
],
- "conversation_style": cls.bing_models.get(model, 'Balanced'),
- "markdown": False,
- "stream": True,
- "model": "Bing"
+ "conversation_style": conversation_style,
+ "markdown": markdown,
+ "stream": stream,
+ "model": base_model
}
-
- full_response = ""
- last_message = ""
-
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
-
- async for line in response.content:
- if line:
- raw_data = line.decode('utf-8').strip()
-
- parts = raw_data.split('')
- for part in parts:
- if part:
- try:
- json_data = json.loads(part)
- except json.JSONDecodeError:
- continue
-
- if json_data.get("error"):
- raise Exception("Error in API response")
-
- if json_data.get("finish"):
- break
-
- if message := json_data.get("message"):
- if message != last_message:
- full_response = message
- last_message = message
+ try:
+ # Read the entire response text
+ text_response = await response.text()
+ # Split the response on the separator character
+ segments = text_response.split('\x1e')
+
+ complete_message = ""
+ for segment in segments:
+ if not segment.strip():
+ continue
+ try:
+ response_data = json.loads(segment)
+ if response_data.get('message'):
+ complete_message = response_data['message']
+ if response_data.get('finish'):
+ break
+ except json.JSONDecodeError:
+ raise Exception(f"Failed to parse segment: {segment}")
- yield full_response.strip()
+ # Yield the complete message
+ yield complete_message
+ except ContentTypeError:
+ raise Exception("Failed to parse response content type.")
diff --git a/g4f/Provider/nexra/NexraBlackbox.py b/g4f/Provider/nexra/NexraBlackbox.py
new file mode 100644
index 00000000..a8b4fca1
--- /dev/null
+++ b/g4f/Provider/nexra/NexraBlackbox.py
@@ -0,0 +1,101 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession, ClientTimeout, ClientError
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+
+class NexraBlackbox(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Nexra Blackbox"
+ url = "https://nexra.aryahcr.cc/documentation/blackbox/en"
+ api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
+ working = True
+ supports_stream = True
+
+ default_model = 'blackbox'
+ models = [default_model]
+
+ model_aliases = {
+ "blackboxai": "blackbox",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ stream: bool = False,
+ markdown: bool = False,
+ websearch: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "Content-Type": "application/json"
+ }
+
+ payload = {
+ "messages": [{"role": msg["role"], "content": msg["content"]} for msg in messages],
+ "websearch": websearch,
+ "stream": stream,
+ "markdown": markdown,
+ "model": model
+ }
+
+ timeout = ClientTimeout(total=600) # 10 minutes timeout
+
+ try:
+ async with ClientSession(headers=headers, timeout=timeout) as session:
+ async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response:
+ if response.status != 200:
+ error_text = await response.text()
+ raise Exception(f"Error: {response.status} - {error_text}")
+
+ content = await response.text()
+
+ # Split content by Record Separator character
+ parts = content.split('\x1e')
+ full_message = ""
+ links = []
+
+ for part in parts:
+ if part:
+ try:
+ json_response = json.loads(part)
+
+ if json_response.get("message"):
+ full_message = json_response["message"] # Overwrite instead of append
+
+ if isinstance(json_response.get("search"), list):
+ links = json_response["search"] # Overwrite instead of extend
+
+ if json_response.get("finish", False):
+ break
+
+ except json.JSONDecodeError:
+ pass
+
+ if full_message:
+ yield full_message.strip()
+
+ if payload["websearch"] and links:
+ yield "\n\n**Source:**"
+ for i, link in enumerate(links, start=1):
+ yield f"\n{i}. {link['title']}: {link['link']}"
+
+ except ClientError:
+ raise
+ except Exception:
+ raise
diff --git a/g4f/Provider/nexra/NexraChatGPT.py b/g4f/Provider/nexra/NexraChatGPT.py
index 8ed83f98..f9f49139 100644
--- a/g4f/Provider/nexra/NexraChatGPT.py
+++ b/g4f/Provider/nexra/NexraChatGPT.py
@@ -1,22 +1,60 @@
from __future__ import annotations
+
from aiohttp import ClientSession
+import json
+
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt
-import json
+
class NexraChatGPT(AsyncGeneratorProvider, ProviderModelMixin):
label = "Nexra ChatGPT"
+ url = "https://nexra.aryahcr.cc/documentation/chatgpt/en"
api_endpoint = "https://nexra.aryahcr.cc/api/chat/gpt"
+ working = True
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+ supports_stream = False
+
+ default_model = 'gpt-3.5-turbo'
+ models = ['gpt-4', 'gpt-4-0613', 'gpt-4-0314', 'gpt-4-32k-0314', 'gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0301', 'text-davinci-003', 'text-davinci-002', 'code-davinci-002', 'gpt-3', 'text-curie-001', 'text-babbage-001', 'text-ada-001', 'davinci', 'curie', 'babbage', 'ada', 'babbage-002', 'davinci-002']
+
+ model_aliases = {
+ "gpt-4": "gpt-4-0613",
+ "gpt-4": "gpt-4-32k",
+ "gpt-4": "gpt-4-0314",
+ "gpt-4": "gpt-4-32k-0314",
+
+ "gpt-3.5-turbo": "gpt-3.5-turbo-16k",
+ "gpt-3.5-turbo": "gpt-3.5-turbo-0613",
+ "gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613",
+ "gpt-3.5-turbo": "gpt-3.5-turbo-0301",
+
+ "gpt-3": "text-davinci-003",
+ "gpt-3": "text-davinci-002",
+ "gpt-3": "code-davinci-002",
+ "gpt-3": "text-curie-001",
+ "gpt-3": "text-babbage-001",
+ "gpt-3": "text-ada-001",
+ "gpt-3": "text-ada-001",
+ "gpt-3": "davinci",
+ "gpt-3": "curie",
+ "gpt-3": "babbage",
+ "gpt-3": "ada",
+ "gpt-3": "babbage-002",
+ "gpt-3": "davinci-002",
+ }
- models = [
- 'gpt-4', 'gpt-4-0613', 'gpt-4-32k', 'gpt-4-0314', 'gpt-4-32k-0314',
- 'gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613',
- 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0301',
- 'gpt-3', 'text-davinci-003', 'text-davinci-002', 'code-davinci-002',
- 'text-curie-001', 'text-babbage-001', 'text-ada-001',
- 'davinci', 'curie', 'babbage', 'ada', 'babbage-002', 'davinci-002',
- ]
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
@classmethod
async def create_async_generator(
@@ -26,41 +64,26 @@ class NexraChatGPT(AsyncGeneratorProvider, ProviderModelMixin):
proxy: str = None,
**kwargs
) -> AsyncResult:
+ model = cls.get_model(model)
+
headers = {
- "Accept": "application/json",
- "Content-Type": "application/json",
- "Referer": f"{cls.url}/chat",
+ "Content-Type": "application/json"
}
-
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
+ "messages": messages,
"prompt": prompt,
"model": model,
- "markdown": False,
- "messages": messages or [],
+ "markdown": False
}
-
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
-
- content_type = response.headers.get('Content-Type', '')
- if 'application/json' in content_type:
- result = await response.json()
- if result.get("status"):
- yield result.get("gpt", "")
- else:
- raise Exception(f"Error in response: {result.get('message', 'Unknown error')}")
- elif 'text/plain' in content_type:
- text = await response.text()
- try:
- result = json.loads(text)
- if result.get("status"):
- yield result.get("gpt", "")
- else:
- raise Exception(f"Error in response: {result.get('message', 'Unknown error')}")
- except json.JSONDecodeError:
- yield text # If not JSON, return text
- else:
- raise Exception(f"Unexpected response type: {content_type}. Response text: {await response.text()}")
-
+ response_text = await response.text()
+ try:
+ if response_text.startswith('_'):
+ response_text = response_text[1:]
+ response_data = json.loads(response_text)
+ yield response_data.get('gpt', '')
+ except json.JSONDecodeError:
+ yield ''
diff --git a/g4f/Provider/nexra/NexraChatGPT4o.py b/g4f/Provider/nexra/NexraChatGPT4o.py
index eb18d439..62144163 100644
--- a/g4f/Provider/nexra/NexraChatGPT4o.py
+++ b/g4f/Provider/nexra/NexraChatGPT4o.py
@@ -1,17 +1,26 @@
from __future__ import annotations
-import json
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt
-
+import json
class NexraChatGPT4o(AsyncGeneratorProvider, ProviderModelMixin):
- label = "Nexra GPT-4o"
+ label = "Nexra ChatGPT4o"
+ url = "https://nexra.aryahcr.cc/documentation/chatgpt/en"
api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
- models = ['gpt-4o']
+ working = True
+ supports_gpt_4 = True
+ supports_stream = False
+
+ default_model = 'gpt-4o'
+ models = [default_model]
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ return cls.default_model
@classmethod
async def create_async_generator(
@@ -21,32 +30,45 @@ class NexraChatGPT4o(AsyncGeneratorProvider, ProviderModelMixin):
proxy: str = None,
**kwargs
) -> AsyncResult:
+ model = cls.get_model(model)
+
headers = {
- "Content-Type": "application/json"
+ "Content-Type": "application/json",
}
async with ClientSession(headers=headers) as session:
data = {
"messages": [
- {'role': 'assistant', 'content': ''},
- {'role': 'user', 'content': format_prompt(messages)}
+ {
+ "role": "user",
+ "content": format_prompt(messages)
+ }
],
+ "stream": False,
"markdown": False,
- "stream": True,
"model": model
}
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
- full_response = ''
- async for line in response.content:
- if line:
- messages = line.decode('utf-8').split('\x1e')
- for message_str in messages:
- try:
- message = json.loads(message_str)
- if message.get('message'):
- full_response = message['message']
- if message.get('finish'):
- yield full_response.strip()
- return
- except json.JSONDecodeError:
- pass
+ buffer = ""
+ last_message = ""
+ async for chunk in response.content.iter_any():
+ chunk_str = chunk.decode()
+ buffer += chunk_str
+ while '{' in buffer and '}' in buffer:
+ start = buffer.index('{')
+ end = buffer.index('}', start) + 1
+ json_str = buffer[start:end]
+ buffer = buffer[end:]
+ try:
+ json_obj = json.loads(json_str)
+ if json_obj.get("finish"):
+ if last_message:
+ yield last_message
+ return
+ elif json_obj.get("message"):
+ last_message = json_obj["message"]
+ except json.JSONDecodeError:
+ pass
+
+ if last_message:
+ yield last_message
diff --git a/g4f/Provider/nexra/NexraChatGPTWeb.py b/g4f/Provider/nexra/NexraChatGPTWeb.py
deleted file mode 100644
index e7738665..00000000
--- a/g4f/Provider/nexra/NexraChatGPTWeb.py
+++ /dev/null
@@ -1,53 +0,0 @@
-from __future__ import annotations
-from aiohttp import ClientSession
-from ...typing import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..helper import format_prompt
-import json
-
-class NexraChatGPTWeb(AsyncGeneratorProvider, ProviderModelMixin):
- label = "Nexra ChatGPT Web"
- api_endpoint = "https://nexra.aryahcr.cc/api/chat/gptweb"
- models = ['gptweb']
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- headers = {
- "Content-Type": "application/json",
- }
-
- async with ClientSession(headers=headers) as session:
- prompt = format_prompt(messages)
- if prompt is None:
- raise ValueError("Prompt cannot be None")
-
- data = {
- "prompt": prompt,
- "markdown": False
- }
-
- async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
-
- full_response = ""
- async for chunk in response.content:
- if chunk:
- result = chunk.decode("utf-8").strip()
-
- try:
- json_data = json.loads(result)
-
- if json_data.get("status"):
- full_response = json_data.get("gpt", "")
- else:
- full_response = f"Error: {json_data.get('message', 'Unknown error')}"
- except json.JSONDecodeError:
- full_response = "Error: Invalid JSON response."
-
- yield full_response.strip()
diff --git a/g4f/Provider/nexra/NexraChatGptV2.py b/g4f/Provider/nexra/NexraChatGptV2.py
new file mode 100644
index 00000000..c0faf93a
--- /dev/null
+++ b/g4f/Provider/nexra/NexraChatGptV2.py
@@ -0,0 +1,93 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import json
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+
+class NexraChatGptV2(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Nexra ChatGPT v2"
+ url = "https://nexra.aryahcr.cc/documentation/chatgpt/en"
+ api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
+ working = True
+ supports_gpt_4 = True
+ supports_stream = True
+
+ default_model = 'chatgpt'
+ models = [default_model]
+
+ model_aliases = {
+ "gpt-4": "chatgpt",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ stream: bool = False,
+ markdown: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "Content-Type": "application/json"
+ }
+
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ data = {
+ "messages": [
+ {
+ "role": "user",
+ "content": prompt
+ }
+ ],
+ "stream": stream,
+ "markdown": markdown,
+ "model": model
+ }
+
+ async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+
+ if stream:
+ # Streamed response handling (stream=True)
+ collected_message = ""
+ async for chunk in response.content.iter_any():
+ if chunk:
+ decoded_chunk = chunk.decode().strip().split("\x1e")
+ for part in decoded_chunk:
+ if part:
+ message_data = json.loads(part)
+
+ # Collect messages until 'finish': true
+ if 'message' in message_data and message_data['message']:
+ collected_message = message_data['message']
+
+ # When finish is true, yield the final collected message
+ if message_data.get('finish', False):
+ yield collected_message
+ return
+ else:
+ # Non-streamed response handling (stream=False)
+ response_data = await response.json(content_type=None)
+
+ # Yield the message directly from the response
+ if 'message' in response_data and response_data['message']:
+ yield response_data['message']
+ return
diff --git a/g4f/Provider/nexra/NexraChatGptWeb.py b/g4f/Provider/nexra/NexraChatGptWeb.py
new file mode 100644
index 00000000..d14a2162
--- /dev/null
+++ b/g4f/Provider/nexra/NexraChatGptWeb.py
@@ -0,0 +1,69 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession, ContentTypeError
+import json
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+
+class NexraChatGptWeb(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Nexra ChatGPT Web"
+ url = "https://nexra.aryahcr.cc/documentation/chatgpt/en"
+ api_endpoint = "https://nexra.aryahcr.cc/api/chat/{}"
+ working = True
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+ supports_stream = True
+
+ default_model = 'gptweb'
+ models = [default_model]
+
+ model_aliases = {
+ "gpt-4": "gptweb",
+ }
+
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ markdown: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "Content-Type": "application/json"
+ }
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ data = {
+ "prompt": prompt,
+ "markdown": markdown
+ }
+ model = cls.get_model(model)
+ endpoint = cls.api_endpoint.format(model)
+ async with session.post(endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ response_text = await response.text()
+
+ # Remove leading underscore if present
+ if response_text.startswith('_'):
+ response_text = response_text[1:]
+
+ try:
+ response_data = json.loads(response_text)
+ yield response_data.get('gpt', response_text)
+ except json.JSONDecodeError:
+ yield response_text
diff --git a/g4f/Provider/nexra/NexraDallE.py b/g4f/Provider/nexra/NexraDallE.py
new file mode 100644
index 00000000..9c8ad12d
--- /dev/null
+++ b/g4f/Provider/nexra/NexraDallE.py
@@ -0,0 +1,66 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import json
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...image import ImageResponse
+
+
+class NexraDallE(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Nexra DALL-E"
+ url = "https://nexra.aryahcr.cc/documentation/dall-e/en"
+ api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
+ working = True
+
+ default_model = 'dalle'
+ models = [default_model]
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ response: str = "url", # base64 or url
+ **kwargs
+ ) -> AsyncResult:
+ # Retrieve the correct model to use
+ model = cls.get_model(model)
+
+ # Format the prompt from the messages
+ prompt = messages[0]['content']
+
+ headers = {
+ "Content-Type": "application/json"
+ }
+ payload = {
+ "prompt": prompt,
+ "model": model,
+ "response": response
+ }
+
+ async with ClientSession(headers=headers) as session:
+ async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response:
+ response.raise_for_status()
+ text_data = await response.text()
+
+ try:
+ # Parse the JSON response
+ json_start = text_data.find('{')
+ json_data = text_data[json_start:]
+ data = json.loads(json_data)
+
+ # Check if the response contains images
+ if 'images' in data and len(data['images']) > 0:
+ image_url = data['images'][0]
+ yield ImageResponse(image_url, prompt)
+ else:
+ yield ImageResponse("No images found in the response.", prompt)
+ except json.JSONDecodeError:
+ yield ImageResponse("Failed to parse JSON. Response might not be in JSON format.", prompt)
diff --git a/g4f/Provider/nexra/NexraDallE2.py b/g4f/Provider/nexra/NexraDallE2.py
new file mode 100644
index 00000000..6b46e8cb
--- /dev/null
+++ b/g4f/Provider/nexra/NexraDallE2.py
@@ -0,0 +1,74 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import json
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...image import ImageResponse
+
+
+class NexraDallE2(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Nexra DALL-E 2"
+ url = "https://nexra.aryahcr.cc/documentation/dall-e/en"
+ api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
+ working = True
+
+ default_model = 'dalle2'
+ models = [default_model]
+ model_aliases = {
+ "dalle-2": "dalle2",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ response: str = "url", # base64 or url
+ **kwargs
+ ) -> AsyncResult:
+ # Retrieve the correct model to use
+ model = cls.get_model(model)
+
+ # Format the prompt from the messages
+ prompt = messages[0]['content']
+
+ headers = {
+ "Content-Type": "application/json"
+ }
+ payload = {
+ "prompt": prompt,
+ "model": model,
+ "response": response
+ }
+
+ async with ClientSession(headers=headers) as session:
+ async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response:
+ response.raise_for_status()
+ text_data = await response.text()
+
+ try:
+ # Parse the JSON response
+ json_start = text_data.find('{')
+ json_data = text_data[json_start:]
+ data = json.loads(json_data)
+
+ # Check if the response contains images
+ if 'images' in data and len(data['images']) > 0:
+ image_url = data['images'][0]
+ yield ImageResponse(image_url, prompt)
+ else:
+ yield ImageResponse("No images found in the response.", prompt)
+ except json.JSONDecodeError:
+ yield ImageResponse("Failed to parse JSON. Response might not be in JSON format.", prompt)
diff --git a/g4f/Provider/nexra/NexraDalleMini.py b/g4f/Provider/nexra/NexraDalleMini.py
new file mode 100644
index 00000000..7fcc7a81
--- /dev/null
+++ b/g4f/Provider/nexra/NexraDalleMini.py
@@ -0,0 +1,66 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import json
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...image import ImageResponse
+
+
+class NexraDalleMini(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Nexra DALL-E Mini"
+ url = "https://nexra.aryahcr.cc/documentation/dall-e/en"
+ api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
+ working = True
+
+ default_model = 'dalle-mini'
+ models = [default_model]
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ response: str = "url", # base64 or url
+ **kwargs
+ ) -> AsyncResult:
+ # Retrieve the correct model to use
+ model = cls.get_model(model)
+
+ # Format the prompt from the messages
+ prompt = messages[0]['content']
+
+ headers = {
+ "Content-Type": "application/json"
+ }
+ payload = {
+ "prompt": prompt,
+ "model": model,
+ "response": response
+ }
+
+ async with ClientSession(headers=headers) as session:
+ async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response:
+ response.raise_for_status()
+ text_data = await response.text()
+
+ try:
+ # Parse the JSON response
+ json_start = text_data.find('{')
+ json_data = text_data[json_start:]
+ data = json.loads(json_data)
+
+ # Check if the response contains images
+ if 'images' in data and len(data['images']) > 0:
+ image_url = data['images'][0]
+ yield ImageResponse(image_url, prompt)
+ else:
+ yield ImageResponse("No images found in the response.", prompt)
+ except json.JSONDecodeError:
+ yield ImageResponse("Failed to parse JSON. Response might not be in JSON format.", prompt)
diff --git a/g4f/Provider/nexra/NexraEmi.py b/g4f/Provider/nexra/NexraEmi.py
new file mode 100644
index 00000000..0d3ed6ba
--- /dev/null
+++ b/g4f/Provider/nexra/NexraEmi.py
@@ -0,0 +1,66 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import json
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...image import ImageResponse
+
+
+class NexraEmi(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Nexra Emi"
+ url = "https://nexra.aryahcr.cc/documentation/emi/en"
+ api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
+ working = True
+
+ default_model = 'emi'
+ models = [default_model]
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ response: str = "url", # base64 or url
+ **kwargs
+ ) -> AsyncResult:
+ # Retrieve the correct model to use
+ model = cls.get_model(model)
+
+ # Format the prompt from the messages
+ prompt = messages[0]['content']
+
+ headers = {
+ "Content-Type": "application/json"
+ }
+ payload = {
+ "prompt": prompt,
+ "model": model,
+ "response": response
+ }
+
+ async with ClientSession(headers=headers) as session:
+ async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response:
+ response.raise_for_status()
+ text_data = await response.text()
+
+ try:
+ # Parse the JSON response
+ json_start = text_data.find('{')
+ json_data = text_data[json_start:]
+ data = json.loads(json_data)
+
+ # Check if the response contains images
+ if 'images' in data and len(data['images']) > 0:
+ image_url = data['images'][0]
+ yield ImageResponse(image_url, prompt)
+ else:
+ yield ImageResponse("No images found in the response.", prompt)
+ except json.JSONDecodeError:
+ yield ImageResponse("Failed to parse JSON. Response might not be in JSON format.", prompt)
diff --git a/g4f/Provider/nexra/NexraFluxPro.py b/g4f/Provider/nexra/NexraFluxPro.py
new file mode 100644
index 00000000..1dbab633
--- /dev/null
+++ b/g4f/Provider/nexra/NexraFluxPro.py
@@ -0,0 +1,74 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import json
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...image import ImageResponse
+
+
+class NexraFluxPro(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Nexra Flux PRO"
+ url = "https://nexra.aryahcr.cc/documentation/flux-pro/en"
+ api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
+ working = True
+
+ default_model = 'flux'
+ models = [default_model]
+ model_aliases = {
+ "flux-pro": "flux",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ response: str = "url", # base64 or url
+ **kwargs
+ ) -> AsyncResult:
+ # Retrieve the correct model to use
+ model = cls.get_model(model)
+
+ # Format the prompt from the messages
+ prompt = messages[0]['content']
+
+ headers = {
+ "Content-Type": "application/json"
+ }
+ payload = {
+ "prompt": prompt,
+ "model": model,
+ "response": response
+ }
+
+ async with ClientSession(headers=headers) as session:
+ async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response:
+ response.raise_for_status()
+ text_data = await response.text()
+
+ try:
+ # Parse the JSON response
+ json_start = text_data.find('{')
+ json_data = text_data[json_start:]
+ data = json.loads(json_data)
+
+ # Check if the response contains images
+ if 'images' in data and len(data['images']) > 0:
+ image_url = data['images'][0]
+ yield ImageResponse(image_url, prompt)
+ else:
+ yield ImageResponse("No images found in the response.", prompt)
+ except json.JSONDecodeError:
+ yield ImageResponse("Failed to parse JSON. Response might not be in JSON format.", prompt)
diff --git a/g4f/Provider/nexra/NexraGeminiPro.py b/g4f/Provider/nexra/NexraGeminiPro.py
index a57daed4..651f7cb4 100644
--- a/g4f/Provider/nexra/NexraGeminiPro.py
+++ b/g4f/Provider/nexra/NexraGeminiPro.py
@@ -1,17 +1,25 @@
from __future__ import annotations
-import json
from aiohttp import ClientSession
-
-from ...typing import AsyncResult, Messages
+import json
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt
+from ...typing import AsyncResult, Messages
class NexraGeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
label = "Nexra Gemini PRO"
+ url = "https://nexra.aryahcr.cc/documentation/gemini-pro/en"
api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
- models = ['gemini-pro']
+ working = True
+ supports_stream = True
+
+ default_model = 'gemini-pro'
+ models = [default_model]
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ return cls.default_model
@classmethod
async def create_async_generator(
@@ -19,34 +27,42 @@ class NexraGeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
model: str,
messages: Messages,
proxy: str = None,
+ stream: bool = False,
+ markdown: bool = False,
**kwargs
) -> AsyncResult:
+ model = cls.get_model(model)
+
headers = {
"Content-Type": "application/json"
}
+
+ data = {
+ "messages": [
+ {
+ "role": "user",
+ "content": format_prompt(messages)
+ }
+ ],
+ "markdown": markdown,
+ "stream": stream,
+ "model": model
+ }
+
async with ClientSession(headers=headers) as session:
- data = {
- "messages": [
- {'role': 'assistant', 'content': ''},
- {'role': 'user', 'content': format_prompt(messages)}
- ],
- "markdown": False,
- "stream": True,
- "model": model
- }
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
- full_response = ''
- async for line in response.content:
- if line:
- messages = line.decode('utf-8').split('\x1e')
- for message_str in messages:
- try:
- message = json.loads(message_str)
- if message.get('message'):
- full_response = message['message']
- if message.get('finish'):
- yield full_response.strip()
- return
- except json.JSONDecodeError:
- pass
+ buffer = ""
+ async for chunk in response.content.iter_any():
+ if chunk.strip(): # Check if chunk is not empty
+ buffer += chunk.decode()
+ while '\x1e' in buffer:
+ part, buffer = buffer.split('\x1e', 1)
+ if part.strip():
+ try:
+ response_json = json.loads(part)
+ message = response_json.get("message", "")
+ if message:
+ yield message
+ except json.JSONDecodeError as e:
+ print(f"JSONDecodeError: {e}")
diff --git a/g4f/Provider/nexra/NexraImageURL.py b/g4f/Provider/nexra/NexraImageURL.py
deleted file mode 100644
index 13d70757..00000000
--- a/g4f/Provider/nexra/NexraImageURL.py
+++ /dev/null
@@ -1,46 +0,0 @@
-from __future__ import annotations
-from aiohttp import ClientSession
-import json
-from ...typing import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..helper import format_prompt
-from ...image import ImageResponse
-
-class NexraImageURL(AsyncGeneratorProvider, ProviderModelMixin):
- label = "Image Generation Provider"
- api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
- models = ['dalle', 'dalle2', 'dalle-mini', 'emi', 'sdxl-turbo', 'prodia']
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- headers = {
- "Content-Type": "application/json",
- }
-
- async with ClientSession(headers=headers) as session:
- prompt = format_prompt(messages)
- data = {
- "prompt": prompt,
- "model": model,
- "response": "url"
- }
-
- async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
- response_text = await response.text()
-
- cleaned_response = response_text.lstrip('_')
- response_json = json.loads(cleaned_response)
-
- images = response_json.get("images")
- if images and len(images) > 0:
- image_response = ImageResponse(images[0], alt="Generated Image")
- yield image_response
- else:
- yield "No image URL found."
diff --git a/g4f/Provider/nexra/NexraLLaMA31.py b/g4f/Provider/nexra/NexraLLaMA31.py
new file mode 100644
index 00000000..c67febb3
--- /dev/null
+++ b/g4f/Provider/nexra/NexraLLaMA31.py
@@ -0,0 +1,83 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import json
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+
+class NexraLLaMA31(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Nexra LLaMA 3.1"
+ url = "https://nexra.aryahcr.cc/documentation/llama-3.1/en"
+ api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
+ working = True
+ supports_stream = True
+
+ default_model = 'llama-3.1'
+ models = [default_model]
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ stream: bool = False,
+ markdown: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "Content-Type": "application/json"
+ }
+
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ data = {
+ "messages": [
+ {
+ "role": "user",
+ "content": prompt
+ }
+ ],
+ "stream": stream,
+ "markdown": markdown,
+ "model": model
+ }
+
+ async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+
+ if stream:
+ # Streamed response handling
+ collected_message = ""
+ async for chunk in response.content.iter_any():
+ if chunk:
+ decoded_chunk = chunk.decode().strip().split("\x1e")
+ for part in decoded_chunk:
+ if part:
+ message_data = json.loads(part)
+
+ # Collect messages until 'finish': true
+ if 'message' in message_data and message_data['message']:
+ collected_message = message_data['message']
+
+ # When finish is true, yield the final collected message
+ if message_data.get('finish', False):
+ yield collected_message
+ return
+ else:
+ # Non-streamed response handling
+ response_data = await response.json(content_type=None)
+
+ # Yield the message directly from the response
+ if 'message' in response_data and response_data['message']:
+ yield response_data['message']
+ return
diff --git a/g4f/Provider/nexra/NexraLlama.py b/g4f/Provider/nexra/NexraLlama.py
deleted file mode 100644
index 9ed892e8..00000000
--- a/g4f/Provider/nexra/NexraLlama.py
+++ /dev/null
@@ -1,52 +0,0 @@
-from __future__ import annotations
-
-import json
-from aiohttp import ClientSession
-
-from ...typing import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..helper import format_prompt
-
-
-class NexraLlama(AsyncGeneratorProvider, ProviderModelMixin):
- label = "Nexra LLaMA 3.1"
- api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
- models = ['llama-3.1']
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- headers = {
- "Content-Type": "application/json"
- }
- async with ClientSession(headers=headers) as session:
- data = {
- "messages": [
- {'role': 'assistant', 'content': ''},
- {'role': 'user', 'content': format_prompt(messages)}
- ],
- "markdown": False,
- "stream": True,
- "model": model
- }
- async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
- full_response = ''
- async for line in response.content:
- if line:
- messages = line.decode('utf-8').split('\x1e')
- for message_str in messages:
- try:
- message = json.loads(message_str)
- if message.get('message'):
- full_response = message['message']
- if message.get('finish'):
- yield full_response.strip()
- return
- except json.JSONDecodeError:
- pass
diff --git a/g4f/Provider/nexra/NexraMidjourney.py b/g4f/Provider/nexra/NexraMidjourney.py
new file mode 100644
index 00000000..3d6a4960
--- /dev/null
+++ b/g4f/Provider/nexra/NexraMidjourney.py
@@ -0,0 +1,66 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import json
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...image import ImageResponse
+
+
+class NexraMidjourney(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Nexra Midjourney"
+ url = "https://nexra.aryahcr.cc/documentation/midjourney/en"
+ api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
+ working = True
+
+ default_model = 'midjourney'
+ models = [default_model]
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ response: str = "url", # base64 or url
+ **kwargs
+ ) -> AsyncResult:
+ # Retrieve the correct model to use
+ model = cls.get_model(model)
+
+ # Format the prompt from the messages
+ prompt = messages[0]['content']
+
+ headers = {
+ "Content-Type": "application/json"
+ }
+ payload = {
+ "prompt": prompt,
+ "model": model,
+ "response": response
+ }
+
+ async with ClientSession(headers=headers) as session:
+ async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response:
+ response.raise_for_status()
+ text_data = await response.text()
+
+ try:
+ # Parse the JSON response
+ json_start = text_data.find('{')
+ json_data = text_data[json_start:]
+ data = json.loads(json_data)
+
+ # Check if the response contains images
+ if 'images' in data and len(data['images']) > 0:
+ image_url = data['images'][0]
+ yield ImageResponse(image_url, prompt)
+ else:
+ yield ImageResponse("No images found in the response.", prompt)
+ except json.JSONDecodeError:
+ yield ImageResponse("Failed to parse JSON. Response might not be in JSON format.", prompt)
diff --git a/g4f/Provider/nexra/NexraProdiaAI.py b/g4f/Provider/nexra/NexraProdiaAI.py
new file mode 100644
index 00000000..262558fd
--- /dev/null
+++ b/g4f/Provider/nexra/NexraProdiaAI.py
@@ -0,0 +1,147 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import json
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...image import ImageResponse
+
+
+class NexraProdiaAI(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Nexra Prodia AI"
+ url = "https://nexra.aryahcr.cc/documentation/prodia/en"
+ api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
+ working = True
+
+ default_model = 'absolutereality_v181.safetensors [3d9d4d2b]'
+ models = [
+ '3Guofeng3_v34.safetensors [50f420de]',
+ 'absolutereality_V16.safetensors [37db0fc3]',
+ default_model,
+ 'amIReal_V41.safetensors [0a8a2e61]',
+ 'analog-diffusion-1.0.ckpt [9ca13f02]',
+ 'aniverse_v30.safetensors [579e6f85]',
+ 'anythingv3_0-pruned.ckpt [2700c435]',
+ 'anything-v4.5-pruned.ckpt [65745d25]',
+ 'anythingV5_PrtRE.safetensors [893e49b9]',
+ 'AOM3A3_orangemixs.safetensors [9600da17]',
+ 'blazing_drive_v10g.safetensors [ca1c1eab]',
+ 'breakdomain_I2428.safetensors [43cc7d2f]',
+ 'breakdomain_M2150.safetensors [15f7afca]',
+ 'cetusMix_Version35.safetensors [de2f2560]',
+ 'childrensStories_v13D.safetensors [9dfaabcb]',
+ 'childrensStories_v1SemiReal.safetensors [a1c56dbb]',
+ 'childrensStories_v1ToonAnime.safetensors [2ec7b88b]',
+ 'Counterfeit_v30.safetensors [9e2a8f19]',
+ 'cuteyukimixAdorable_midchapter3.safetensors [04bdffe6]',
+ 'cyberrealistic_v33.safetensors [82b0d085]',
+ 'dalcefo_v4.safetensors [425952fe]',
+ 'deliberate_v2.safetensors [10ec4b29]',
+ 'deliberate_v3.safetensors [afd9d2d4]',
+ 'dreamlike-anime-1.0.safetensors [4520e090]',
+ 'dreamlike-diffusion-1.0.safetensors [5c9fd6e0]',
+ 'dreamlike-photoreal-2.0.safetensors [fdcf65e7]',
+ 'dreamshaper_6BakedVae.safetensors [114c8abb]',
+ 'dreamshaper_7.safetensors [5cf5ae06]',
+ 'dreamshaper_8.safetensors [9d40847d]',
+ 'edgeOfRealism_eorV20.safetensors [3ed5de15]',
+ 'EimisAnimeDiffusion_V1.ckpt [4f828a15]',
+ 'elldreths-vivid-mix.safetensors [342d9d26]',
+ 'epicphotogasm_xPlusPlus.safetensors [1a8f6d35]',
+ 'epicrealism_naturalSinRC1VAE.safetensors [90a4c676]',
+ 'epicrealism_pureEvolutionV3.safetensors [42c8440c]',
+ 'ICantBelieveItsNotPhotography_seco.safetensors [4e7a3dfd]',
+ 'indigoFurryMix_v75Hybrid.safetensors [91208cbb]',
+ 'juggernaut_aftermath.safetensors [5e20c455]',
+ 'lofi_v4.safetensors [ccc204d6]',
+ 'lyriel_v16.safetensors [68fceea2]',
+ 'majicmixRealistic_v4.safetensors [29d0de58]',
+ 'mechamix_v10.safetensors [ee685731]',
+ 'meinamix_meinaV9.safetensors [2ec66ab0]',
+ 'meinamix_meinaV11.safetensors [b56ce717]',
+ 'neverendingDream_v122.safetensors [f964ceeb]',
+ 'openjourney_V4.ckpt [ca2f377f]',
+ 'pastelMixStylizedAnime_pruned_fp16.safetensors [793a26e8]',
+ 'portraitplus_V1.0.safetensors [1400e684]',
+ 'protogenx34.safetensors [5896f8d5]',
+ 'Realistic_Vision_V1.4-pruned-fp16.safetensors [8d21810b]',
+ 'Realistic_Vision_V2.0.safetensors [79587710]',
+ 'Realistic_Vision_V4.0.safetensors [29a7afaa]',
+ 'Realistic_Vision_V5.0.safetensors [614d1063]',
+ 'Realistic_Vision_V5.1.safetensors [a0f13c83]',
+ 'redshift_diffusion-V10.safetensors [1400e684]',
+ 'revAnimated_v122.safetensors [3f4fefd9]',
+ 'rundiffusionFX25D_v10.safetensors [cd12b0ee]',
+ 'rundiffusionFX_v10.safetensors [cd4e694d]',
+ 'sdv1_4.ckpt [7460a6fa]',
+ 'v1-5-pruned-emaonly.safetensors [d7049739]',
+ 'v1-5-inpainting.safetensors [21c7ab71]',
+ 'shoninsBeautiful_v10.safetensors [25d8c546]',
+ 'theallys-mix-ii-churned.safetensors [5d9225a4]',
+ 'timeless-1.0.ckpt [7c4971d4]',
+ 'toonyou_beta6.safetensors [980f6b15]',
+ ]
+
+ model_aliases = {
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str, # Select from the list of models
+ messages: Messages,
+ proxy: str = None,
+ response: str = "url", # base64 or url
+ steps: str = 25, # Min: 1, Max: 30
+ cfg_scale: str = 7, # Min: 0, Max: 20
+ sampler: str = "DPM++ 2M Karras", # Select from these: "Euler","Euler a","Heun","DPM++ 2M Karras","DPM++ SDE Karras","DDIM"
+ negative_prompt: str = "", # Indicates what the AI should not do
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "Content-Type": "application/json"
+ }
+ async with ClientSession(headers=headers) as session:
+ prompt = messages[0]['content']
+ data = {
+ "prompt": prompt,
+ "model": "prodia",
+ "response": response,
+ "data": {
+ "model": model,
+ "steps": steps,
+ "cfg_scale": cfg_scale,
+ "sampler": sampler,
+ "negative_prompt": negative_prompt
+ }
+ }
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ text_data = await response.text()
+
+ if response.status == 200:
+ try:
+ json_start = text_data.find('{')
+ json_data = text_data[json_start:]
+
+ data = json.loads(json_data)
+ if 'images' in data and len(data['images']) > 0:
+ image_url = data['images'][-1]
+ yield ImageResponse(image_url, prompt)
+ else:
+ yield ImageResponse("No images found in the response.", prompt)
+ except json.JSONDecodeError:
+ yield ImageResponse("Failed to parse JSON. Response might not be in JSON format.", prompt)
+ else:
+ yield ImageResponse(f"Request failed with status: {response.status}", prompt)
diff --git a/g4f/Provider/nexra/NexraQwen.py b/g4f/Provider/nexra/NexraQwen.py
index ae8e9a0e..8bdf5475 100644
--- a/g4f/Provider/nexra/NexraQwen.py
+++ b/g4f/Provider/nexra/NexraQwen.py
@@ -1,7 +1,7 @@
from __future__ import annotations
-import json
from aiohttp import ClientSession
+import json
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
@@ -10,8 +10,17 @@ from ..helper import format_prompt
class NexraQwen(AsyncGeneratorProvider, ProviderModelMixin):
label = "Nexra Qwen"
+ url = "https://nexra.aryahcr.cc/documentation/qwen/en"
api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
- models = ['qwen']
+ working = True
+ supports_stream = True
+
+ default_model = 'qwen'
+ models = [default_model]
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ return cls.default_model
@classmethod
async def create_async_generator(
@@ -19,34 +28,59 @@ class NexraQwen(AsyncGeneratorProvider, ProviderModelMixin):
model: str,
messages: Messages,
proxy: str = None,
+ stream: bool = False,
+ markdown: bool = False,
**kwargs
) -> AsyncResult:
+ model = cls.get_model(model)
+
headers = {
- "Content-Type": "application/json"
+ "Content-Type": "application/json",
+ "accept": "application/json",
+ "origin": cls.url,
+ "referer": f"{cls.url}/chat",
}
async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
data = {
"messages": [
- {'role': 'assistant', 'content': ''},
- {'role': 'user', 'content': format_prompt(messages)}
+ {
+ "role": "user",
+ "content": prompt
+ }
],
- "markdown": False,
- "stream": True,
+ "markdown": markdown,
+ "stream": stream,
"model": model
}
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
- full_response = ''
- async for line in response.content:
- if line:
- messages = line.decode('utf-8').split('\x1e')
- for message_str in messages:
+
+ complete_message = ""
+
+ # If streaming, process each chunk separately
+ if stream:
+ async for chunk in response.content.iter_any():
+ if chunk:
try:
- message = json.loads(message_str)
- if message.get('message'):
- full_response = message['message']
- if message.get('finish'):
- yield full_response.strip()
- return
+ # Decode the chunk and split by the delimiter
+ parts = chunk.decode('utf-8').split('\x1e')
+ for part in parts:
+ if part.strip(): # Ensure the part is not empty
+ response_data = json.loads(part)
+ message_part = response_data.get('message')
+ if message_part:
+ complete_message = message_part
except json.JSONDecodeError:
- pass
+ continue
+
+ # Yield the final complete message
+ if complete_message:
+ yield complete_message
+ else:
+ # Handle non-streaming response
+ text_response = await response.text()
+ response_data = json.loads(text_response)
+ message = response_data.get('message')
+ if message:
+ yield message
diff --git a/g4f/Provider/nexra/NexraSD15.py b/g4f/Provider/nexra/NexraSD15.py
new file mode 100644
index 00000000..410947df
--- /dev/null
+++ b/g4f/Provider/nexra/NexraSD15.py
@@ -0,0 +1,70 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession
+from ...image import ImageResponse
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+
+
+class NexraSD15(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Nexra Stable Diffusion 1.5"
+ url = "https://nexra.aryahcr.cc/documentation/stable-diffusion/en"
+ api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
+ working = True
+
+ default_model = 'stablediffusion-1.5'
+ models = [default_model]
+
+ model_aliases = {
+ "sd-1.5": "stablediffusion-1.5",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ response: str = "url", # base64 or url
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "Content-Type": "application/json",
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "prompt": messages,
+ "model": model,
+ "response": response
+ }
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ text_response = await response.text()
+
+ # Clean the response by removing unexpected characters
+ cleaned_response = text_response.strip('__')
+
+ if not cleaned_response.strip():
+ raise ValueError("Received an empty response from the server.")
+
+ try:
+ json_response = json.loads(cleaned_response)
+ image_url = json_response.get("images", [])[0]
+ # Create an ImageResponse object
+ image_response = ImageResponse(images=image_url, alt="Generated Image")
+ yield image_response
+ except json.JSONDecodeError:
+ raise ValueError("Unable to decode JSON from the received text response.")
diff --git a/g4f/Provider/nexra/NexraSD21.py b/g4f/Provider/nexra/NexraSD21.py
new file mode 100644
index 00000000..fc5c90d9
--- /dev/null
+++ b/g4f/Provider/nexra/NexraSD21.py
@@ -0,0 +1,75 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession
+from ...image import ImageResponse
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+
+
+class NexraSD21(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Nexra Stable Diffusion 2.1"
+ url = "https://nexra.aryahcr.cc/documentation/stable-diffusion/en"
+ api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
+ working = True
+
+ default_model = 'stablediffusion-2.1'
+ models = [default_model]
+
+ model_aliases = {
+ "sd-2.1": "stablediffusion-2.1",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ response: str = "url", # base64 or url
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "Content-Type": "application/json",
+ }
+ async with ClientSession(headers=headers) as session:
+ # Directly use the messages as the prompt
+ data = {
+ "prompt": messages,
+ "model": model,
+ "response": response,
+ "data": {
+ "prompt_negative": "",
+ "guidance_scale": 9
+ }
+ }
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ text_response = await response.text()
+
+ # Clean the response by removing unexpected characters
+ cleaned_response = text_response.strip('__')
+
+ if not cleaned_response.strip():
+ raise ValueError("Received an empty response from the server.")
+
+ try:
+ json_response = json.loads(cleaned_response)
+ image_url = json_response.get("images", [])[0]
+ # Create an ImageResponse object
+ image_response = ImageResponse(images=image_url, alt="Generated Image")
+ yield image_response
+ except json.JSONDecodeError:
+ raise ValueError("Unable to decode JSON from the received text response.")
diff --git a/g4f/Provider/nexra/NexraSDLora.py b/g4f/Provider/nexra/NexraSDLora.py
new file mode 100644
index 00000000..ad986507
--- /dev/null
+++ b/g4f/Provider/nexra/NexraSDLora.py
@@ -0,0 +1,68 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import json
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...image import ImageResponse
+
+
+class NexraSDLora(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Nexra Stable Diffusion Lora"
+ url = "https://nexra.aryahcr.cc/documentation/stable-diffusion/en"
+ api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
+ working = True
+
+ default_model = 'sdxl-lora'
+ models = [default_model]
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ response: str = "url", # base64 or url
+ guidance: str = 0.3, # Min: 0, Max: 5
+ steps: str = 2, # Min: 2, Max: 10
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "Content-Type": "application/json"
+ }
+ async with ClientSession(headers=headers) as session:
+ prompt = messages[0]['content']
+ data = {
+ "prompt": prompt,
+ "model": model,
+ "response": response,
+ "data": {
+ "guidance": guidance,
+ "steps": steps
+ }
+ }
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ text_data = await response.text()
+
+ if response.status == 200:
+ try:
+ json_start = text_data.find('{')
+ json_data = text_data[json_start:]
+
+ data = json.loads(json_data)
+ if 'images' in data and len(data['images']) > 0:
+ image_url = data['images'][-1]
+ yield ImageResponse(image_url, prompt)
+ else:
+ yield ImageResponse("No images found in the response.", prompt)
+ except json.JSONDecodeError:
+ yield ImageResponse("Failed to parse JSON. Response might not be in JSON format.", prompt)
+ else:
+ yield ImageResponse(f"Request failed with status: {response.status}", prompt)
diff --git a/g4f/Provider/nexra/NexraSDTurbo.py b/g4f/Provider/nexra/NexraSDTurbo.py
new file mode 100644
index 00000000..feb59f0b
--- /dev/null
+++ b/g4f/Provider/nexra/NexraSDTurbo.py
@@ -0,0 +1,68 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import json
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...image import ImageResponse
+
+
+class NexraSDTurbo(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Nexra Stable Diffusion Turbo"
+ url = "https://nexra.aryahcr.cc/documentation/stable-diffusion/en"
+ api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
+ working = True
+
+ default_model = 'sdxl-turbo'
+ models = [default_model]
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ response: str = "url", # base64 or url
+ strength: str = 0.7, # Min: 0, Max: 1
+ steps: str = 2, # Min: 1, Max: 10
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "Content-Type": "application/json"
+ }
+ async with ClientSession(headers=headers) as session:
+ prompt = messages[0]['content']
+ data = {
+ "prompt": prompt,
+ "model": model,
+ "response": response,
+ "data": {
+ "strength": strength,
+ "steps": steps
+ }
+ }
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ text_data = await response.text()
+
+ if response.status == 200:
+ try:
+ json_start = text_data.find('{')
+ json_data = text_data[json_start:]
+
+ data = json.loads(json_data)
+ if 'images' in data and len(data['images']) > 0:
+ image_url = data['images'][-1]
+ yield ImageResponse(image_url, prompt)
+ else:
+ yield ImageResponse("No images found in the response.", prompt)
+ except json.JSONDecodeError:
+ yield ImageResponse("Failed to parse JSON. Response might not be in JSON format.", prompt)
+ else:
+ yield ImageResponse(f"Request failed with status: {response.status}", prompt)
diff --git a/g4f/Provider/nexra/__init__.py b/g4f/Provider/nexra/__init__.py
index 8b137891..d8b9218f 100644
--- a/g4f/Provider/nexra/__init__.py
+++ b/g4f/Provider/nexra/__init__.py
@@ -1 +1,21 @@
-
+from .NexraAnimagineXL import NexraAnimagineXL
+from .NexraBing import NexraBing
+from .NexraBlackbox import NexraBlackbox
+from .NexraChatGPT import NexraChatGPT
+from .NexraChatGPT4o import NexraChatGPT4o
+from .NexraChatGptV2 import NexraChatGptV2
+from .NexraChatGptWeb import NexraChatGptWeb
+from .NexraDallE import NexraDallE
+from .NexraDallE2 import NexraDallE2
+from .NexraDalleMini import NexraDalleMini
+from .NexraEmi import NexraEmi
+from .NexraFluxPro import NexraFluxPro
+from .NexraGeminiPro import NexraGeminiPro
+from .NexraLLaMA31 import NexraLLaMA31
+from .NexraMidjourney import NexraMidjourney
+from .NexraProdiaAI import NexraProdiaAI
+from .NexraQwen import NexraQwen
+from .NexraSD15 import NexraSD15
+from .NexraSD21 import NexraSD21
+from .NexraSDLora import NexraSDLora
+from .NexraSDTurbo import NexraSDTurbo