summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/nexra/NexraBing.py
diff options
context:
space:
mode:
Diffstat (limited to 'g4f/Provider/nexra/NexraBing.py')
-rw-r--r--g4f/Provider/nexra/NexraBing.py141
1 files changed, 69 insertions, 72 deletions
diff --git a/g4f/Provider/nexra/NexraBing.py b/g4f/Provider/nexra/NexraBing.py
index 716e9254..28f0b117 100644
--- a/g4f/Provider/nexra/NexraBing.py
+++ b/g4f/Provider/nexra/NexraBing.py
@@ -1,96 +1,93 @@
from __future__ import annotations
-from aiohttp import ClientSession
-from aiohttp.client_exceptions import ContentTypeError
-
-from ...typing import AsyncResult, Messages
-from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..helper import format_prompt
import json
+import requests
+from ...typing import CreateResult, Messages
+from ..base_provider import ProviderModelMixin, AbstractProvider
+from ..helper import format_prompt
-class NexraBing(AsyncGeneratorProvider, ProviderModelMixin):
+class NexraBing(AbstractProvider, ProviderModelMixin):
label = "Nexra Bing"
url = "https://nexra.aryahcr.cc/documentation/bing/en"
api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements"
- working = False
- supports_gpt_4 = False
- supports_stream = False
+ working = True
+ supports_stream = True
- default_model = 'Bing (Balanced)'
- models = ['Bing (Balanced)', 'Bing (Creative)', 'Bing (Precise)']
+ default_model = 'Balanced'
+ models = [default_model, 'Creative', 'Precise']
model_aliases = {
- "gpt-4": "Bing (Balanced)",
- "gpt-4": "Bing (Creative)",
- "gpt-4": "Bing (Precise)",
+ "gpt-4": "Balanced",
+ "gpt-4": "Creative",
+ "gpt-4": "Precise",
}
@classmethod
- def get_model_and_style(cls, model: str) -> tuple[str, str]:
- # Default to the default model if not found
- model = cls.model_aliases.get(model, model)
- if model not in cls.models:
- model = cls.default_model
-
- # Extract the base model and conversation style
- base_model, conversation_style = model.split(' (')
- conversation_style = conversation_style.rstrip(')')
- return base_model, conversation_style
-
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
@classmethod
- async def create_async_generator(
+ def create_completion(
cls,
model: str,
messages: Messages,
- proxy: str = None,
stream: bool = False,
+ proxy: str = None,
markdown: bool = False,
**kwargs
- ) -> AsyncResult:
- base_model, conversation_style = cls.get_model_and_style(model)
-
+ ) -> CreateResult:
+ model = cls.get_model(model)
+
headers = {
- "Content-Type": "application/json",
- "origin": cls.url,
- "referer": f"{cls.url}/chat",
+ 'Content-Type': 'application/json'
+ }
+
+ data = {
+ "messages": [
+ {
+ "role": "user",
+ "content": format_prompt(messages)
+ }
+ ],
+ "conversation_style": model,
+ "markdown": markdown,
+ "stream": stream,
+ "model": "Bing"
}
- async with ClientSession(headers=headers) as session:
- prompt = format_prompt(messages)
- data = {
- "messages": [
- {
- "role": "user",
- "content": prompt
- }
- ],
- "conversation_style": conversation_style,
- "markdown": markdown,
- "stream": stream,
- "model": base_model
- }
- async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
- try:
- # Read the entire response text
- text_response = await response.text()
- # Split the response on the separator character
- segments = text_response.split('\x1e')
-
- complete_message = ""
- for segment in segments:
- if not segment.strip():
- continue
- try:
- response_data = json.loads(segment)
- if response_data.get('message'):
- complete_message = response_data['message']
- if response_data.get('finish'):
- break
- except json.JSONDecodeError:
- raise Exception(f"Failed to parse segment: {segment}")
+
+ response = requests.post(cls.api_endpoint, headers=headers, json=data, stream=True)
+
+ return cls.process_response(response)
+
+ @classmethod
+ def process_response(cls, response):
+ if response.status_code != 200:
+ yield f"Error: {response.status_code}"
+ return
+
+ full_message = ""
+ for chunk in response.iter_content(chunk_size=None):
+ if chunk:
+ messages = chunk.decode('utf-8').split('\x1e')
+ for message in messages:
+ try:
+ json_data = json.loads(message)
+ if json_data.get('finish', False):
+ return
+ current_message = json_data.get('message', '')
+ if current_message:
+ new_content = current_message[len(full_message):]
+ if new_content:
+ yield new_content
+ full_message = current_message
+ except json.JSONDecodeError:
+ continue
- # Yield the complete message
- yield complete_message
- except ContentTypeError:
- raise Exception("Failed to parse response content type.")
+ if not full_message:
+ yield "No message received"