summaryrefslogtreecommitdiffstats
path: root/g4f/Provider
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--g4f/Provider/Airforce.py491
-rw-r--r--g4f/Provider/Allyfy.py6
-rw-r--r--g4f/Provider/Bixin123.py94
-rw-r--r--g4f/Provider/Blackbox.py28
-rw-r--r--g4f/Provider/ChatGpt.py49
-rw-r--r--g4f/Provider/HuggingChat.py2
-rw-r--r--g4f/Provider/Liaobots.py9
-rw-r--r--g4f/Provider/LiteIcoding.py21
-rw-r--r--g4f/Provider/MagickPen.py7
-rw-r--r--g4f/Provider/__init__.py1
10 files changed, 406 insertions, 302 deletions
diff --git a/g4f/Provider/Airforce.py b/g4f/Provider/Airforce.py
index 51f8ba55..e2b4be21 100644
--- a/g4f/Provider/Airforce.py
+++ b/g4f/Provider/Airforce.py
@@ -1,76 +1,199 @@
from __future__ import annotations
-
-from aiohttp import ClientSession, ClientResponseError
+import random
import json
+from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..image import ImageResponse
-from .helper import format_prompt
-from ..errors import ResponseStatusError
+
+def split_long_message(message: str, max_length: int = 4000) -> list[str]:
+ return [message[i:i+max_length] for i in range(0, len(message), max_length)]
class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://api.airforce"
- text_api_endpoint = "https://api.airforce/chat/completions"
image_api_endpoint = "https://api.airforce/imagine2"
+ text_api_endpoint = "https://api.airforce/chat/completions"
working = True
+
+ default_model = 'llama-3-70b-chat'
+
supports_gpt_35_turbo = True
supports_gpt_4 = True
supports_stream = True
supports_system_message = True
supports_message_history = True
- default_model = 'llama-3-70b-chat'
+
text_models = [
- # Open source models
- 'llama-2-13b-chat',
- 'llama-3-70b-chat',
- 'llama-3-70b-chat-turbo',
- 'llama-3-70b-chat-lite',
- 'llama-3-8b-chat',
- 'llama-3-8b-chat-turbo',
- 'llama-3-8b-chat-lite',
- 'llama-3.1-405b-turbo',
- 'llama-3.1-70b-turbo',
- 'llama-3.1-8b-turbo',
- 'LlamaGuard-2-8b',
- 'Llama-Guard-7b',
- 'Meta-Llama-Guard-3-8B',
- 'Mixtral-8x7B-Instruct-v0.1',
- 'Mixtral-8x22B-Instruct-v0.1',
- 'Mistral-7B-Instruct-v0.1',
- 'Mistral-7B-Instruct-v0.2',
- 'Mistral-7B-Instruct-v0.3',
- 'Qwen1.5-72B-Chat',
- 'Qwen1.5-110B-Chat',
- 'Qwen2-72B-Instruct',
- 'gemma-2b-it',
- 'gemma-2-9b-it',
- 'gemma-2-27b-it',
- 'dbrx-instruct',
- 'deepseek-llm-67b-chat',
- 'Nous-Hermes-2-Mixtral-8x7B-DPO',
- 'Nous-Hermes-2-Yi-34B',
- 'WizardLM-2-8x22B',
- 'SOLAR-10.7B-Instruct-v1.0',
- 'StripedHyena-Nous-7B',
- 'sparkdesk',
-
- # Other models
- 'chatgpt-4o-latest',
- 'gpt-4',
- 'gpt-4-turbo',
- 'gpt-4o-mini-2024-07-18',
- 'gpt-4o-mini',
- 'gpt-4o',
- 'gpt-3.5-turbo',
- 'gpt-3.5-turbo-0125',
- 'gpt-3.5-turbo-1106',
- 'gpt-3.5-turbo-16k',
- 'gpt-3.5-turbo-0613',
- 'gpt-3.5-turbo-16k-0613',
- 'gemini-1.5-flash',
- 'gemini-1.5-pro',
+ # anthorpic
+ 'claude-3-haiku-20240307',
+ 'claude-3-sonnet-20240229',
+ 'claude-3-5-sonnet-20240620',
+ 'claude-3-opus-20240229',
+
+ # openai
+ 'chatgpt-4o-latest',
+ 'gpt-4',
+ #'gpt-4-0613',
+ 'gpt-4-turbo',
+ 'gpt-4o-mini-2024-07-18',
+ 'gpt-4o-mini',
+ 'gpt-3.5-turbo',
+ 'gpt-3.5-turbo-0125',
+ 'gpt-3.5-turbo-1106',
+ #'gpt-3.5-turbo-16k', # No response from the API.
+ #'gpt-3.5-turbo-0613', # No response from the API.
+ #'gpt-3.5-turbo-16k-0613', # No response from the API.
+ 'gpt-4o',
+ #'o1-mini', # No response from the API.
+
+ # meta-llama
+ 'llama-3-70b-chat',
+ 'llama-3-70b-chat-turbo',
+ 'llama-3-8b-chat',
+ 'llama-3-8b-chat-turbo',
+ 'llama-3-70b-chat-lite',
+ 'llama-3-8b-chat-lite',
+ #'llama-2-70b-chat', # Failed to load response after multiple retries.
+ 'llama-2-13b-chat',
+ #'llama-2-7b-chat', # Failed to load response after multiple retries.
+ 'llama-3.1-405b-turbo',
+ 'llama-3.1-70b-turbo',
+ 'llama-3.1-8b-turbo',
+ 'LlamaGuard-2-8b',
+ 'Llama-Guard-7b',
+ 'Llama-3.2-90B-Vision-Instruct-Turbo',
+
+ # codellama
+ #'CodeLlama-7b-Python-hf', # Failed to load response after multiple retries.
+ #'CodeLlama-7b-Python',
+ #'CodeLlama-13b-Python-hf', # Failed to load response after multiple retries.
+ #'CodeLlama-34b-Python-hf', # Failed to load response after multiple retries.
+ #'CodeLlama-70b-Python-hf', # Failed to load response after multiple retries.
+
+ # 01-ai
+ #'Yi-34B-Chat', # Failed to load response after multiple retries.
+ #'Yi-34B', # Failed to load response after multiple retries.
+ #'Yi-6B', # Failed to load response after multiple retries.
+
+ # mistral-ai
+ #'Mixtral-8x7B-v0.1',
+ #'Mixtral-8x22B', # Failed to load response after multiple retries.
+ 'Mixtral-8x7B-Instruct-v0.1',
+ 'Mixtral-8x22B-Instruct-v0.1',
+ 'Mistral-7B-Instruct-v0.1',
+ 'Mistral-7B-Instruct-v0.2',
+ 'Mistral-7B-Instruct-v0.3',
+
+ # openchat
+ #'openchat-3.5', # Failed to load response after multiple retries.
+
+ # wizardlm
+ #'WizardLM-13B-V1.2', # Failed to load response after multiple retries.
+ #'WizardCoder-Python-34B-V1.0', # Failed to load response after multiple retries.
+
+ # qwen
+ #'Qwen1.5-0.5B-Chat', # Failed to load response after multiple retries.
+ #'Qwen1.5-1.8B-Chat', # Failed to load response after multiple retries.
+ #'Qwen1.5-4B-Chat', # Failed to load response after multiple retries.
+ 'Qwen1.5-7B-Chat',
+ 'Qwen1.5-14B-Chat',
+ 'Qwen1.5-72B-Chat',
+ 'Qwen1.5-110B-Chat',
+ 'Qwen2-72B-Instruct',
+
+ # google
+ 'gemma-2b-it',
+ #'gemma-7b-it', # Failed to load response after multiple retries.
+ #'gemma-2b', # Failed to load response after multiple retries.
+ #'gemma-7b', # Failed to load response after multiple retries.
+ 'gemma-2-9b-it', # fix bug
+ 'gemma-2-27b-it',
+
+ # gemini
+ 'gemini-1.5-flash',
+ 'gemini-1.5-pro',
+
+ # databricks
+ 'dbrx-instruct',
+
+ # lmsys
+ #'vicuna-7b-v1.5', # Failed to load response after multiple retries.
+ #'vicuna-13b-v1.5', # Failed to load response after multiple retries.
+
+ # cognitivecomputations
+ #'dolphin-2.5-mixtral-8x7b', # Failed to load response after multiple retries.
+
+ # deepseek-ai
+ #'deepseek-coder-33b-instruct', # No response from the API.
+ #'deepseek-coder-67b-instruct', # Failed to load response after multiple retries.
+ 'deepseek-llm-67b-chat',
+
+ # NousResearch
+ #'Nous-Capybara-7B-V1p9', # Failed to load response after multiple retries.
+ 'Nous-Hermes-2-Mixtral-8x7B-DPO',
+ #'Nous-Hermes-2-Mixtral-8x7B-SFT', # Failed to load response after multiple retries.
+ #'Nous-Hermes-llama-2-7b', # Failed to load response after multiple retries.
+ #'Nous-Hermes-Llama2-13b', # Failed to load response after multiple retries.
+ 'Nous-Hermes-2-Yi-34B',
+
+ # Open-Orca
+ #'Mistral-7B-OpenOrca', # Failed to load response after multiple retries.
+
+ # togethercomputer
+ #'alpaca-7b', # Failed to load response after multiple retries.
+
+ # teknium
+ #'OpenHermes-2-Mistral-7B', # Failed to load response after multiple retries.
+ #'OpenHermes-2.5-Mistral-7B', # Failed to load response after multiple retries.
+
+ # microsoft
+ 'WizardLM-2-8x22B',
+
+ # Nexusflow
+ #'NexusRaven-V2-13B', # Failed to load response after multiple retries.
+
+ # Phind
+ #'Phind-CodeLlama-34B-v2', # Failed to load response after multiple retries.
+
+ # Snoflake
+ #'snowflake-arctic-instruct', # No response from the API.
+
+ # upstage
+ 'SOLAR-10.7B-Instruct-v1.0',
+
+ # togethercomputer
+ #'StripedHyena-Hessian-7B', # Failed to load response after multiple retries.
+ #'StripedHyena-Nous-7B', # Failed to load response after multiple retries.
+ #'Llama-2-7B-32K-Instruct', # Failed to load response after multiple retries.
+ #'CodeLlama-13b-Instruct', # No response from the API.
+ #'evo-1-131k-base', # Failed to load response after multiple retries.
+ #'OLMo-7B-Instruct', # Failed to load response after multiple retries.
+
+ # garage-bAInd
+ #'Platypus2-70B-instruct', # Failed to load response after multiple retries.
+
+ # snorkelai
+ #'Snorkel-Mistral-PairRM-DPO', # Failed to load response after multiple retries.
+
+ # Undi95
+ #'ReMM-SLERP-L2-13B', # Failed to load response after multiple retries.
+
+ # Gryphe
+ 'MythoMax-L2-13b',
+
+ # Autism
+ #'chronos-hermes-13b', # Failed to load response after multiple retries.
+
+ # Undi95
+ #'Toppy-M-7B', # Failed to load response after multiple retries.
+
+ # iFlytek
+ #'sparkdesk', # Failed to load response after multiple retries.
+
+ # pawan
+ 'cosmosrp',
+
]
-
image_models = [
'flux',
'flux-realism',
@@ -85,158 +208,206 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
models = [
*text_models,
- *image_models
+ *image_models,
]
model_aliases = {
- # Open source models
- "llama-2-13b": "llama-2-13b-chat",
+ # anthorpic
+ "claude-3-haiku": "claude-3-haiku-20240307",
+ "claude-3-sonnet": "claude-3-sonnet-20240229",
+ "claude-3-5-sonnet": "claude-3-5-sonnet-20240620",
+ "claude-3-opus": "claude-3-opus-20240229",
+
+ # openai
+ "gpt-4o": "chatgpt-4o-latest",
+ "gpt-4o-mini": "gpt-4o-mini-2024-07-18",
+ "gpt-3.5-turbo": "gpt-3.5-turbo-0125",
+ "gpt-3.5-turbo": "gpt-3.5-turbo-1106",
+
+ # meta-llama
"llama-3-70b": "llama-3-70b-chat",
"llama-3-70b": "llama-3-70b-chat-turbo",
- "llama-3-70b": "llama-3-70b-chat-lite",
"llama-3-8b": "llama-3-8b-chat",
"llama-3-8b": "llama-3-8b-chat-turbo",
+ "llama-3-70b": "llama-3-70b-chat-lite",
"llama-3-8b": "llama-3-8b-chat-lite",
+ "llama-2-13b": "llama-2-13b-chat",
"llama-3.1-405b": "llama-3.1-405b-turbo",
"llama-3.1-70b": "llama-3.1-70b-turbo",
"llama-3.1-8b": "llama-3.1-8b-turbo",
+ "llamaguard-2-8b": "LlamaGuard-2-8b",
+ "llamaguard-7b": "Llama-Guard-7b",
+ "llama-3.2-90b": "Llama-3.2-90B-Vision-Instruct-Turbo",
+
+ # mistral-ai
"mixtral-8x7b": "Mixtral-8x7B-Instruct-v0.1",
"mixtral-8x22b": "Mixtral-8x22B-Instruct-v0.1",
"mistral-7b": "Mistral-7B-Instruct-v0.1",
"mistral-7b": "Mistral-7B-Instruct-v0.2",
"mistral-7b": "Mistral-7B-Instruct-v0.3",
- "mixtral-8x7b-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO",
+
+ # qwen
+ "qwen-1.5-7b": "Qwen1.5-7B-Chat",
+ "qwen-1.5-14b": "Qwen1.5-14B-Chat",
"qwen-1.5-72b": "Qwen1.5-72B-Chat",
"qwen-1.5-110b": "Qwen1.5-110B-Chat",
"qwen-2-72b": "Qwen2-72B-Instruct",
+
+ # google
"gemma-2b": "gemma-2b-it",
- "gemma-2b-9b": "gemma-2-9b-it",
- "gemma-2b-27b": "gemma-2-27b-it",
+ "gemma-2-9b": "gemma-2-9b-it",
+ "gemma-2-27b": "gemma-2-27b-it",
+
+ # gemini
+ "gemini-flash": "gemini-1.5-flash",
+ "gemini-pro": "gemini-1.5-pro",
+
+ # deepseek-ai
"deepseek": "deepseek-llm-67b-chat",
+
+ # NousResearch
+ "mixtral-8x7b-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO",
"yi-34b": "Nous-Hermes-2-Yi-34B",
+
+ # microsoft
"wizardlm-2-8x22b": "WizardLM-2-8x22B",
- "solar-10-7b": "SOLAR-10.7B-Instruct-v1.0",
- "sh-n-7b": "StripedHyena-Nous-7B",
- "sparkdesk-v1.1": "sparkdesk",
- # Other models
- "gpt-4o": "chatgpt-4o-latest",
- "gpt-4o-mini": "gpt-4o-mini-2024-07-18",
- "gpt-3.5-turbo": "gpt-3.5-turbo-0125",
- "gpt-3.5-turbo": "gpt-3.5-turbo-1106",
- "gpt-3.5-turbo": "gpt-3.5-turbo-16k",
- "gpt-3.5-turbo": "gpt-3.5-turbo-0613",
- "gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613",
- "gemini-flash": "gemini-1.5-flash",
- "gemini-pro": "gemini-1.5-pro",
+ # upstage
+ "solar-10.7b": "SOLAR-10.7B-Instruct-v1.0",
- # Image models
- "dalle-3": "dall-e-3",
+ # Gryphe
+ "mythomax-l2-13b": "MythoMax-L2-13b",
}
@classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases.get(model, cls.default_model)
+ else:
+ return cls.default_model
+
+ @classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
+ seed: int = None,
+ size: str = "1:1",
+ stream: bool = False,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
-
+
+ # If the model is an image model, use the image API
+ if model in cls.image_models:
+ async for result in cls._generate_image(model, messages, proxy, seed, size):
+ yield result
+ # If the model is a text model, use the text API
+ elif model in cls.text_models:
+ async for result in cls._generate_text(model, messages, proxy, stream):
+ yield result
+
+ @classmethod
+ async def _generate_image(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ seed: int = None,
+ size: str = "1:1",
+ **kwargs
+ ) -> AsyncResult:
headers = {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
- "content-type": "application/json",
- "origin": "https://api.airforce",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36",
- "authorization": "Bearer null",
"cache-control": "no-cache",
- "pragma": "no-cache",
- "priority": "u=1, i",
- "referer": "https://llmplayground.net/",
- "sec-ch-ua": '"Not;A=Brand";v="24", "Chromium";v="128"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": '"Linux"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "cross-site",
+ "origin": "https://llmplayground.net",
+ "user-agent": "Mozilla/5.0"
}
- if model in cls.image_models:
- async for item in cls.generate_image(model, messages, headers, proxy, **kwargs):
- yield item
- else:
- async for item in cls.generate_text(model, messages, headers, proxy, **kwargs):
- yield item
+ if seed is None:
+ seed = random.randint(0, 100000)
- @classmethod
- async def generate_text(cls, model: str, messages: Messages, headers: dict, proxy: str, **kwargs) -> AsyncResult:
- async with ClientSession() as session:
- data = {
- "messages": [{"role": "user", "content": message['content']} for message in messages],
+ # Assume the first message is the prompt for the image
+ prompt = messages[0]['content']
+
+ async with ClientSession(headers=headers) as session:
+ params = {
"model": model,
- "max_tokens": kwargs.get('max_tokens', 4096),
- "temperature": kwargs.get('temperature', 1),
- "top_p": kwargs.get('top_p', 1),
- "stream": True
+ "prompt": prompt,
+ "size": size,
+ "seed": seed
}
+ async with session.get(f"{cls.image_api_endpoint}", params=params, proxy=proxy) as response:
+ response.raise_for_status()
+ content_type = response.headers.get('Content-Type', '').lower()
- try:
- async with session.post(cls.text_api_endpoint, json=data, headers=headers, proxy=proxy) as response:
- response.raise_for_status()
- async for line in response.content:
- if line:
- line = line.decode('utf-8').strip()
- if line.startswith("data: "):
- if line == "data: [DONE]":
- break
- try:
- data = json.loads(line[6:])
- if 'choices' in data and len(data['choices']) > 0:
- delta = data['choices'][0].get('delta', {})
- if 'content' in delta:
- content = delta['content']
- if "One message exceeds the 1000chars per message limit" in content:
- raise ResponseStatusError(
- "Message too long",
- 400,
- "Please try a shorter message."
- )
- yield content
- except json.JSONDecodeError:
- continue
- except ResponseStatusError as e:
- raise e
- except Exception as e:
- raise ResponseStatusError(str(e), 500, "An unexpected error occurred")
+ if 'application/json' in content_type:
+ async for chunk in response.content.iter_chunked(1024):
+ if chunk:
+ yield chunk.decode('utf-8')
+ elif 'image' in content_type:
+ image_data = b""
+ async for chunk in response.content.iter_chunked(1024):
+ if chunk:
+ image_data += chunk
+ image_url = f"{cls.image_api_endpoint}?model={model}&prompt={prompt}&size={size}&seed={seed}"
+ alt_text = f"Generated image for prompt: {prompt}"
+ yield ImageResponse(images=image_url, alt=alt_text)
@classmethod
- async def generate_image(cls, model: str, messages: Messages, headers: dict, proxy: str, **kwargs) -> AsyncResult:
- prompt = messages[-1]['content'] if messages else ""
- params = {
- "prompt": prompt,
- "size": kwargs.get("size", "1:1"),
- "seed": kwargs.get("seed"),
- "model": model
+ async def _generate_text(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ stream: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "authorization": "Bearer missing api key",
+ "content-type": "application/json",
+ "user-agent": "Mozilla/5.0"
}
- params = {k: v for k, v in params.items() if v is not None}
- try:
- async with ClientSession(headers=headers) as session:
- async with session.get(cls.image_api_endpoint, params=params, proxy=proxy) as response:
+ async with ClientSession(headers=headers) as session:
+ formatted_prompt = cls._format_messages(messages)
+ prompt_parts = split_long_message(formatted_prompt)
+ full_response = ""
+
+ for part in prompt_parts:
+ data = {
+ "messages": [{"role": "user", "content": part}],
+ "model": model,
+ "max_tokens": 4096,
+ "temperature": 1,
+ "top_p": 1,
+ "stream": stream
+ }
+ async with session.post(cls.text_api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
- content = await response.read()
-
- if response.content_type.startswith('image/'):
- image_url = str(response.url)
- yield ImageResponse(image_url, prompt)
+ part_response = ""
+ if stream:
+ async for line in response.content:
+ if line:
+ line = line.decode('utf-8').strip()
+ if line.startswith("data: ") and line != "data: [DONE]":
+ json_data = json.loads(line[6:])
+ content = json_data['choices'][0]['delta'].get('content', '')
+ part_response += content
else:
- try:
- text = content.decode('utf-8', errors='ignore')
- raise ResponseStatusError("Image generation failed", response.status, text)
- except Exception as decode_error:
- raise ResponseStatusError("Decoding error", 500, str(decode_error))
- except ClientResponseError as e:
- raise ResponseStatusError(f"HTTP {e.status}", e.status, e.message)
- except Exception as e:
- raise ResponseStatusError("Unexpected error", 500, str(e))
+ json_data = await response.json()
+ content = json_data['choices'][0]['message']['content']
+ part_response = content
+
+ full_response += part_response
+ yield full_response
+
+ @classmethod
+ def _format_messages(cls, messages: Messages) -> str:
+ """Formats messages for text generation."""
+ return " ".join([msg['content'] for msg in messages])
diff --git a/g4f/Provider/Allyfy.py b/g4f/Provider/Allyfy.py
index 8733b1ec..eb202a4f 100644
--- a/g4f/Provider/Allyfy.py
+++ b/g4f/Provider/Allyfy.py
@@ -9,8 +9,8 @@ from .helper import format_prompt
class Allyfy(AsyncGeneratorProvider):
- url = "https://chatbot.allyfy.chat"
- api_endpoint = "/api/v1/message/stream/super/chat"
+ url = "https://allyfy.chat"
+ api_endpoint = "https://chatbot.allyfy.chat/api/v1/message/stream/super/chat"
working = True
supports_gpt_35_turbo = True
@@ -53,7 +53,7 @@ class Allyfy(AsyncGeneratorProvider):
"packageName": "com.cch.allyfy.webh",
}
}
- async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response:
response.raise_for_status()
full_response = []
async for line in response.content:
diff --git a/g4f/Provider/Bixin123.py b/g4f/Provider/Bixin123.py
deleted file mode 100644
index 081064f9..00000000
--- a/g4f/Provider/Bixin123.py
+++ /dev/null
@@ -1,94 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession
-import json
-import random
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from ..typing import AsyncResult, Messages
-from .helper import format_prompt
-
-class Bixin123(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://chat.bixin123.com"
- api_endpoint = "https://chat.bixin123.com/api/chatgpt/chat-process"
- working = True
- supports_gpt_35_turbo = True
- supports_gpt_4 = True
-
- default_model = 'gpt-3.5-turbo-0125'
- models = ['gpt-3.5-turbo', 'gpt-3.5-turbo-0125', 'gpt-3.5-turbo-16k-0613', 'gpt-4-turbo', 'qwen-turbo']
-
- model_aliases = {
- "gpt-3.5-turbo": "gpt-3.5-turbo-0125",
- "gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613",
- }
-
- @classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases[model]
- else:
- return cls.default_model
-
- @classmethod
- def generate_fingerprint(cls) -> str:
- return str(random.randint(100000000, 999999999))
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- model = cls.get_model(model)
-
- headers = {
- "accept": "application/json, text/plain, */*",
- "accept-language": "en-US,en;q=0.9",
- "cache-control": "no-cache",
- "content-type": "application/json",
- "fingerprint": cls.generate_fingerprint(),
- "origin": cls.url,
- "pragma": "no-cache",
- "priority": "u=1, i",
- "referer": f"{cls.url}/chat",
- "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
- "sec-ch-ua-mobile": "?0",
- "sec-ch-ua-platform": '"Linux"',
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "same-origin",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
- "x-website-domain": "chat.bixin123.com",
- }
-
- async with ClientSession(headers=headers) as session:
- prompt = format_prompt(messages)
- data = {
- "prompt": prompt,
- "options": {
- "usingNetwork": False,
- "file": ""
- }
- }
- async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
- response_text = await response.text()
-
- lines = response_text.strip().split("\n")
- last_json = None
- for line in reversed(lines):
- try:
- last_json = json.loads(line)
- break
- except json.JSONDecodeError:
- pass
-
- if last_json:
- text = last_json.get("text", "")
- yield text
- else:
- yield ""
diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py
index 3e183076..b074d28f 100644
--- a/g4f/Provider/Blackbox.py
+++ b/g4f/Provider/Blackbox.py
@@ -25,18 +25,10 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
'llama-3.1-70b',
'llama-3.1-405b',
'ImageGenerationLV45LJp',
- 'GPT-4o',
- 'Gemini-PRO',
- 'Claude-Sonnet-3.5',
+ 'gpt-4o',
+ 'gemini-pro',
+ 'claude-sonnet-3.5',
]
-
- model_aliases = {
- "gemini-flash": "gemini-1.5-flash",
- "flux": "ImageGenerationLV45LJp",
- "gpt-4o": "GPT-4o",
- "gemini-pro": "Gemini-PRO",
- "claude-3.5-sonnet": "Claude-Sonnet-3.5",
- }
agentMode = {
'ImageGenerationLV45LJp': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
@@ -51,9 +43,14 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
}
userSelectedModel = {
- "GPT-4o": "GPT-4o",
- "Gemini-PRO": "Gemini-PRO",
- 'Claude-Sonnet-3.5': "Claude-Sonnet-3.5",
+ "gpt-4o": "gpt-4o",
+ "gemini-pro": "gemini-pro",
+ 'claude-sonnet-3.5': "claude-sonnet-3.5",
+ }
+
+ model_aliases = {
+ "gemini-flash": "gemini-1.5-flash",
+ "flux": "ImageGenerationLV45LJp",
}
@classmethod
@@ -119,8 +116,9 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"agentMode": {},
"trendingAgentMode": {},
"userSelectedModel": None,
+ "userSystemPrompt": None,
"isMicMode": False,
- "maxTokens": 99999999,
+ "maxTokens": 1024,
"playgroundTopP": 0.9,
"playgroundTemperature": 0.5,
"isChromeExt": False,
diff --git a/g4f/Provider/ChatGpt.py b/g4f/Provider/ChatGpt.py
index fc34fc2b..b5a78b9a 100644
--- a/g4f/Provider/ChatGpt.py
+++ b/g4f/Provider/ChatGpt.py
@@ -134,11 +134,21 @@ class ChatGpt(AbstractProvider, ProviderModelMixin):
}
response = session.post('https://chatgpt.com/backend-anon/sentinel/chat-requirements',
- headers=headers, json={'p': pow_req}).json()
+ headers=headers, json={'p': pow_req})
- turnstile = response.get('turnstile', {})
+ if response.status_code != 200:
+ print(f"Request failed with status: {response.status_code}")
+ print(f"Response content: {response.content}")
+ return
+
+ response_data = response.json()
+ if "detail" in response_data and "Unusual activity" in response_data["detail"]:
+ print(f"Blocked due to unusual activity: {response_data['detail']}")
+ return
+
+ turnstile = response_data.get('turnstile', {})
turnstile_required = turnstile.get('required')
- pow_conf = response.get('proofofwork', {})
+ pow_conf = response_data.get('proofofwork', {})
if turnstile_required:
turnstile_dx = turnstile.get('dx')
@@ -146,7 +156,7 @@ class ChatGpt(AbstractProvider, ProviderModelMixin):
headers = headers | {
'openai-sentinel-turnstile-token' : turnstile_token,
- 'openai-sentinel-chat-requirements-token': response.get('token'),
+ 'openai-sentinel-chat-requirements-token': response_data.get('token'),
'openai-sentinel-proof-token' : get_answer_token(
pow_conf.get('seed'), pow_conf.get('difficulty'), config
)
@@ -187,20 +197,29 @@ class ChatGpt(AbstractProvider, ProviderModelMixin):
'screen_width': random.randint(1200, 2000),
},
}
+
+ time.sleep(2)
response = session.post('https://chatgpt.com/backend-anon/conversation',
headers=headers, json=json_data, stream=True)
-
+
replace = ''
for line in response.iter_lines():
if line:
- if 'DONE' in line.decode():
- break
-
- data = json.loads(line.decode()[6:])
- if data.get('message').get('author').get('role') == 'assistant':
- tokens = (data.get('message').get('content').get('parts')[0])
-
- yield tokens.replace(replace, '')
-
- replace = tokens
+ decoded_line = line.decode()
+ print(f"Received line: {decoded_line}")
+ if decoded_line.startswith('data:'):
+ json_string = decoded_line[6:]
+ if json_string.strip():
+ try:
+ data = json.loads(json_string)
+ except json.JSONDecodeError as e:
+ print(f"Error decoding JSON: {e}, content: {json_string}")
+ continue
+
+ if data.get('message').get('author').get('role') == 'assistant':
+ tokens = (data.get('message').get('content').get('parts')[0])
+
+ yield tokens.replace(replace, '')
+
+ replace = tokens
diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py
index e6f70bed..30e97d7d 100644
--- a/g4f/Provider/HuggingChat.py
+++ b/g4f/Provider/HuggingChat.py
@@ -17,6 +17,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
'meta-llama/Meta-Llama-3.1-70B-Instruct',
'CohereForAI/c4ai-command-r-plus-08-2024',
'Qwen/Qwen2.5-72B-Instruct',
+ 'meta-llama/Llama-3.2-11B-Vision-Instruct',
'NousResearch/Hermes-3-Llama-3.1-8B',
'mistralai/Mistral-Nemo-Instruct-2407',
'microsoft/Phi-3.5-mini-instruct',
@@ -26,6 +27,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct",
"command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024",
"qwen-2-72b": "Qwen/Qwen2.5-72B-Instruct",
+ "llama-3.2-11b": "meta-llama/Llama-3.2-11B-Vision-Instruct",
"hermes-3": "NousResearch/Hermes-3-Llama-3.1-8B",
"mistral-nemo": "mistralai/Mistral-Nemo-Instruct-2407",
"phi-3.5-mini": "microsoft/Phi-3.5-mini-instruct",
diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py
index b292020e..00c54600 100644
--- a/g4f/Provider/Liaobots.py
+++ b/g4f/Provider/Liaobots.py
@@ -9,6 +9,15 @@ from .helper import get_connector
from ..requests import raise_for_status
models = {
+ "gpt-3.5-turbo": {
+ "id": "gpt-3.5-turbo",
+ "name": "GPT-3.5-Turbo",
+ "model": "ChatGPT",
+ "provider": "OpenAI",
+ "maxLength": 48000,
+ "tokenLimit": 14000,
+ "context": "16K",
+ },
"gpt-4o-mini-free": {
"id": "gpt-4o-mini-free",
"name": "GPT-4o-Mini-Free",
diff --git a/g4f/Provider/LiteIcoding.py b/g4f/Provider/LiteIcoding.py
index 1b568e80..bf8f9ba8 100644
--- a/g4f/Provider/LiteIcoding.py
+++ b/g4f/Provider/LiteIcoding.py
@@ -1,12 +1,11 @@
from __future__ import annotations
-
-from aiohttp import ClientSession, ClientResponseError
+import base64
import re
+from aiohttp import ClientSession, ClientResponseError
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
-
class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://lite.icoding.ink"
api_endpoint = "/api/v1/gpt/message"
@@ -27,18 +26,20 @@ class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin):
}
bearer_tokens = [
- "aa3020ee873e40cb8b3f515a0708ebc4",
- "5d69cd271b144226ac1199b3c849a566",
- "62977f48a95844f8853a953679401850",
- "d815b091959e42dd8b7871dfaf879485"
+ "NWQ2OWNkMjcxYjE0NDIyNmFjMTE5OWIzYzg0OWE1NjY=",
+ "ZDgxNWIwOTU5NTk0ZTRkZDhiNzg3MWRmYWY4Nzk0ODU="
]
current_token_index = 0
@classmethod
+ def decode_token(cls, encoded_token: str) -> str:
+ return base64.b64decode(encoded_token).decode('utf-8')
+
+ @classmethod
def get_next_bearer_token(cls):
- token = cls.bearer_tokens[cls.current_token_index]
+ encoded_token = cls.bearer_tokens[cls.current_token_index]
cls.current_token_index = (cls.current_token_index + 1) % len(cls.bearer_tokens)
- return token
+ return cls.decode_token(encoded_token)
@classmethod
async def create_async_generator(
@@ -95,9 +96,11 @@ class LiteIcoding(AsyncGeneratorProvider, ProviderModelMixin):
response.raise_for_status()
buffer = ""
full_response = ""
+
def decode_content(data):
bytes_array = bytes([int(b, 16) ^ 255 for b in data.split()])
return bytes_array.decode('utf-8')
+
async for chunk in response.content.iter_any():
if chunk:
buffer += chunk.decode()
diff --git a/g4f/Provider/MagickPen.py b/g4f/Provider/MagickPen.py
index b6a47417..c15a59f5 100644
--- a/g4f/Provider/MagickPen.py
+++ b/g4f/Provider/MagickPen.py
@@ -24,21 +24,18 @@ class MagickPen(AsyncGeneratorProvider, ProviderModelMixin):
@classmethod
async def fetch_api_credentials(cls) -> tuple:
- url = "https://magickpen.com/_nuxt/9e47cd7579e60a9d1f13.js"
+ url = "https://magickpen.com/_nuxt/bf709a9ce19f14e18116.js"
async with ClientSession() as session:
async with session.get(url) as response:
text = await response.text()
- # Extract the necessary values from the file
pattern = r'"X-API-Secret":"(\w+)"'
match = re.search(pattern, text)
X_API_SECRET = match.group(1) if match else None
- # Generate timestamp and nonce
- timestamp = str(int(time.time() * 1000)) # in milliseconds
+ timestamp = str(int(time.time() * 1000))
nonce = str(random.random())
- # Generate the signature
s = ["TGDBU9zCgM", timestamp, nonce]
s.sort()
signature_string = ''.join(s)
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 82cb9ff2..c2b21481 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -19,7 +19,6 @@ from .Aura import Aura
from .Bing import Bing
from .BingCreateImages import BingCreateImages
from .Binjie import Binjie
-from .Bixin123 import Bixin123
from .Blackbox import Blackbox
from .ChatGot import ChatGot
from .ChatGpt import ChatGpt