summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/Bixin123.py
diff options
context:
space:
mode:
authorkqlio67 <kqlio67@users.noreply.github.com>2024-09-06 18:32:18 +0200
committerkqlio67 <kqlio67@users.noreply.github.com>2024-09-06 18:32:18 +0200
commit0fb46d45927c1c7781f232a09244b5370e9a0ba5 (patch)
treea6307f70d26ce3444690be73657bfc80b45fd8bd /g4f/Provider/Bixin123.py
parentrefactor(provider): use default model for code improvement prompt (diff)
downloadgpt4free-0fb46d45927c1c7781f232a09244b5370e9a0ba5.tar
gpt4free-0fb46d45927c1c7781f232a09244b5370e9a0ba5.tar.gz
gpt4free-0fb46d45927c1c7781f232a09244b5370e9a0ba5.tar.bz2
gpt4free-0fb46d45927c1c7781f232a09244b5370e9a0ba5.tar.lz
gpt4free-0fb46d45927c1c7781f232a09244b5370e9a0ba5.tar.xz
gpt4free-0fb46d45927c1c7781f232a09244b5370e9a0ba5.tar.zst
gpt4free-0fb46d45927c1c7781f232a09244b5370e9a0ba5.zip
Diffstat (limited to 'g4f/Provider/Bixin123.py')
-rw-r--r--g4f/Provider/Bixin123.py89
1 files changed, 89 insertions, 0 deletions
diff --git a/g4f/Provider/Bixin123.py b/g4f/Provider/Bixin123.py
new file mode 100644
index 00000000..694a2eff
--- /dev/null
+++ b/g4f/Provider/Bixin123.py
@@ -0,0 +1,89 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import json
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..typing import AsyncResult, Messages
+from .helper import format_prompt
+
+class Bixin123(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://chat.bixin123.com"
+ api_endpoint = "https://chat.bixin123.com/api/chatgpt/chat-process"
+ working = True
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+
+ default_model = 'gpt-3.5-turbo-0125'
+ models = ['gpt-3.5-turbo-0125', 'gpt-3.5-turbo-16k-0613', 'gpt-4-turbo', 'qwen-turbo']
+
+ model_aliases = {
+ "gpt-3.5-turbo": "gpt-3.5-turbo-0125",
+ "gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "accept": "application/json, text/plain, */*",
+ "accept-language": "en-US,en;q=0.9",
+ "cache-control": "no-cache",
+ "content-type": "application/json",
+ "fingerprint": "988148794",
+ "origin": cls.url,
+ "pragma": "no-cache",
+ "priority": "u=1, i",
+ "referer": f"{cls.url}/chat",
+ "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
+ "x-website-domain": "chat.bixin123.com",
+ }
+
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ data = {
+ "prompt": prompt,
+ "options": {
+ "usingNetwork": False,
+ "file": ""
+ }
+ }
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ response_text = await response.text()
+
+ lines = response_text.strip().split("\n")
+ last_json = None
+ for line in reversed(lines):
+ try:
+ last_json = json.loads(line)
+ break
+ except json.JSONDecodeError:
+ pass
+
+ if last_json:
+ text = last_json.get("text", "")
+ yield text
+ else:
+ yield ""