diff options
author | Tekky <98614666+xtekky@users.noreply.github.com> | 2024-10-22 23:32:27 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-10-22 23:32:27 +0200 |
commit | a63c18de796bd4f3e818ff170b6ff595304f95e0 (patch) | |
tree | 844dbb9a8d3526a8b60564b78f7a19a4e0f605d9 /g4f/Provider/nexra/NexraQwen.py | |
parent | Merge pull request #2282 from Karasiq/patch-1 (diff) | |
parent | Updated docs/providers-and-models.md g4f/models.py g4f/Provider/Upstage.py (diff) | |
download | gpt4free-a63c18de796bd4f3e818ff170b6ff595304f95e0.tar gpt4free-a63c18de796bd4f3e818ff170b6ff595304f95e0.tar.gz gpt4free-a63c18de796bd4f3e818ff170b6ff595304f95e0.tar.bz2 gpt4free-a63c18de796bd4f3e818ff170b6ff595304f95e0.tar.lz gpt4free-a63c18de796bd4f3e818ff170b6ff595304f95e0.tar.xz gpt4free-a63c18de796bd4f3e818ff170b6ff595304f95e0.tar.zst gpt4free-a63c18de796bd4f3e818ff170b6ff595304f95e0.zip |
Diffstat (limited to 'g4f/Provider/nexra/NexraQwen.py')
-rw-r--r-- | g4f/Provider/nexra/NexraQwen.py | 114 |
1 files changed, 57 insertions, 57 deletions
diff --git a/g4f/Provider/nexra/NexraQwen.py b/g4f/Provider/nexra/NexraQwen.py index 8bdf5475..7f944e44 100644 --- a/g4f/Provider/nexra/NexraQwen.py +++ b/g4f/Provider/nexra/NexraQwen.py @@ -1,14 +1,13 @@ from __future__ import annotations -from aiohttp import ClientSession import json +import requests -from ...typing import AsyncResult, Messages -from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ...typing import CreateResult, Messages +from ..base_provider import ProviderModelMixin, AbstractProvider from ..helper import format_prompt - -class NexraQwen(AsyncGeneratorProvider, ProviderModelMixin): +class NexraQwen(AbstractProvider, ProviderModelMixin): label = "Nexra Qwen" url = "https://nexra.aryahcr.cc/documentation/qwen/en" api_endpoint = "https://nexra.aryahcr.cc/api/chat/complements" @@ -21,66 +20,67 @@ class NexraQwen(AsyncGeneratorProvider, ProviderModelMixin): @classmethod def get_model(cls, model: str) -> str: return cls.default_model - + @classmethod - async def create_async_generator( + def create_completion( cls, model: str, messages: Messages, + stream: bool, proxy: str = None, - stream: bool = False, markdown: bool = False, **kwargs - ) -> AsyncResult: + ) -> CreateResult: model = cls.get_model(model) - + headers = { - "Content-Type": "application/json", - "accept": "application/json", - "origin": cls.url, - "referer": f"{cls.url}/chat", + 'Content-Type': 'application/json' + } + + data = { + "messages": [ + { + "role": "user", + "content": format_prompt(messages) + } + ], + "stream": stream, + "markdown": markdown, + "model": model } - async with ClientSession(headers=headers) as session: - prompt = format_prompt(messages) - data = { - "messages": [ - { - "role": "user", - "content": prompt - } - ], - "markdown": markdown, - "stream": stream, - "model": model - } - async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: - response.raise_for_status() - - complete_message = "" - - # If streaming, process each chunk separately - if stream: - async for chunk in response.content.iter_any(): - if chunk: - try: - # Decode the chunk and split by the delimiter - parts = chunk.decode('utf-8').split('\x1e') - for part in parts: - if part.strip(): # Ensure the part is not empty - response_data = json.loads(part) - message_part = response_data.get('message') - if message_part: - complete_message = message_part - except json.JSONDecodeError: - continue + + response = requests.post(cls.api_endpoint, headers=headers, json=data, stream=stream) + + if stream: + return cls.process_streaming_response(response) + else: + return cls.process_non_streaming_response(response) - # Yield the final complete message - if complete_message: - yield complete_message - else: - # Handle non-streaming response - text_response = await response.text() - response_data = json.loads(text_response) - message = response_data.get('message') - if message: - yield message + @classmethod + def process_non_streaming_response(cls, response): + if response.status_code == 200: + try: + content = response.text.lstrip('') + data = json.loads(content) + return data.get('message', '') + except json.JSONDecodeError: + return "Error: Unable to decode JSON response" + else: + return f"Error: {response.status_code}" + + @classmethod + def process_streaming_response(cls, response): + full_message = "" + for line in response.iter_lines(decode_unicode=True): + if line: + try: + line = line.lstrip('') + data = json.loads(line) + if data.get('finish'): + break + message = data.get('message', '') + if message is not None and message != full_message: + yield message[len(full_message):] + full_message = message + except json.JSONDecodeError: + pass |