summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/Nexra.py
diff options
context:
space:
mode:
Diffstat (limited to 'g4f/Provider/Nexra.py')
-rw-r--r--g4f/Provider/Nexra.py118
1 files changed, 60 insertions, 58 deletions
diff --git a/g4f/Provider/Nexra.py b/g4f/Provider/Nexra.py
index b2b83837..33e794f6 100644
--- a/g4f/Provider/Nexra.py
+++ b/g4f/Provider/Nexra.py
@@ -1,32 +1,49 @@
from __future__ import annotations
-import json
-from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
+from aiohttp import ClientSession
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
-from ..image import ImageResponse
+from .nexra.NexraBing import NexraBing
+from .nexra.NexraChatGPT import NexraChatGPT
+from .nexra.NexraChatGPT4o import NexraChatGPT4o
+from .nexra.NexraChatGPTWeb import NexraChatGPTWeb
+from .nexra.NexraGeminiPro import NexraGeminiPro
+from .nexra.NexraImageURL import NexraImageURL
+from .nexra.NexraLlama import NexraLlama
+from .nexra.NexraQwen import NexraQwen
class Nexra(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://nexra.aryahcr.cc"
- chat_api_endpoint = "https://nexra.aryahcr.cc/api/chat/gpt"
- image_api_endpoint = "https://nexra.aryahcr.cc/api/image/complements"
working = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
+ supports_stream = True
supports_system_message = True
supports_message_history = True
-
default_model = 'gpt-3.5-turbo'
- text_models = [
- 'gpt-4', 'gpt-4-0613', 'gpt-4-32k', 'gpt-4-0314', 'gpt-4-32k-0314',
- 'gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0301',
- 'gpt-3', 'text-davinci-003', 'text-davinci-002', 'code-davinci-002',
- 'text-curie-001', 'text-babbage-001', 'text-ada-001',
- 'davinci', 'curie', 'babbage', 'ada', 'babbage-002', 'davinci-002',
- ]
- image_models = ['dalle', 'dalle2', 'dalle-mini', 'emi']
- models = [*text_models, *image_models]
+ image_model = 'sdxl-turbo'
+
+ models = (
+ *NexraBing.models,
+ *NexraChatGPT.models,
+ *NexraChatGPT4o.models,
+ *NexraChatGPTWeb.models,
+ *NexraGeminiPro.models,
+ *NexraImageURL.models,
+ *NexraLlama.models,
+ *NexraQwen.models,
+ )
+
+ model_to_provider = {
+ **{model: NexraChatGPT for model in NexraChatGPT.models},
+ **{model: NexraChatGPT4o for model in NexraChatGPT4o.models},
+ **{model: NexraChatGPTWeb for model in NexraChatGPTWeb.models},
+ **{model: NexraGeminiPro for model in NexraGeminiPro.models},
+ **{model: NexraImageURL for model in NexraImageURL.models},
+ **{model: NexraLlama for model in NexraLlama.models},
+ **{model: NexraQwen for model in NexraQwen.models},
+ **{model: NexraBing for model in NexraBing.models},
+ }
model_aliases = {
"gpt-4": "gpt-4-0613",
@@ -53,12 +70,20 @@ class Nexra(AsyncGeneratorProvider, ProviderModelMixin):
"gpt-3": "babbage-002",
"gpt-3": "davinci-002",
+ "gpt-4": "gptweb",
+
+ "gpt-4": "Bing (Balanced)",
+ "gpt-4": "Bing (Creative)",
+ "gpt-4": "Bing (Precise)",
+
"dalle-2": "dalle2",
+ "sdxl": "sdxl-turbo",
}
-
+
+
@classmethod
def get_model(cls, model: str) -> str:
- if model in cls.text_models or model in cls.image_models:
+ if model in cls.models:
return model
elif model in cls.model_aliases:
return cls.model_aliases[model]
@@ -66,6 +91,14 @@ class Nexra(AsyncGeneratorProvider, ProviderModelMixin):
return cls.default_model
@classmethod
+ def get_api_endpoint(cls, model: str) -> str:
+ provider_class = cls.model_to_provider.get(model)
+
+ if provider_class:
+ return provider_class.api_endpoint
+ raise ValueError(f"API endpoint for model {model} not found.")
+
+ @classmethod
async def create_async_generator(
cls,
model: str,
@@ -74,43 +107,12 @@ class Nexra(AsyncGeneratorProvider, ProviderModelMixin):
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
-
- headers = {
- "Content-Type": "application/json",
- }
-
- async with ClientSession(headers=headers) as session:
- if model in cls.image_models:
- # Image generation
- prompt = messages[-1]['content'] if messages else ""
- data = {
- "prompt": prompt,
- "model": model,
- "response": "url"
- }
- async with session.post(cls.image_api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
- result = await response.text()
- result_json = json.loads(result.strip('_'))
- image_url = result_json['images'][0] if result_json['images'] else None
-
- if image_url:
- yield ImageResponse(images=image_url, alt=prompt)
- else:
- # Text completion
- data = {
- "messages": messages,
- "prompt": format_prompt(messages),
- "model": model,
- "markdown": False
- }
- async with session.post(cls.chat_api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
- result = await response.text()
-
- try:
- json_response = json.loads(result)
- gpt_response = json_response.get('gpt', '')
- yield gpt_response
- except json.JSONDecodeError:
- yield result
+ api_endpoint = cls.get_api_endpoint(model)
+
+ provider_class = cls.model_to_provider.get(model)
+
+ if provider_class:
+ async for response in provider_class.create_async_generator(model, messages, proxy, **kwargs):
+ yield response
+ else:
+ raise ValueError(f"Provider for model {model} not found.")