summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/Liaobots.py
diff options
context:
space:
mode:
Diffstat (limited to 'g4f/Provider/Liaobots.py')
-rw-r--r--g4f/Provider/Liaobots.py235
1 files changed, 184 insertions, 51 deletions
diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py
index 75ecf300..fc50bdee 100644
--- a/g4f/Provider/Liaobots.py
+++ b/g4f/Provider/Liaobots.py
@@ -1,7 +1,6 @@
from __future__ import annotations
import uuid
-
from aiohttp import ClientSession, BaseConnector
from ..typing import AsyncResult, Messages
@@ -10,53 +9,110 @@ from .helper import get_connector
from ..requests import raise_for_status
models = {
- "gpt-4o": {
- "context": "8K",
- "id": "gpt-4o-free",
+ "gpt-4o-mini-free": {
+ "id": "gpt-4o-mini-free",
+ "name": "GPT-4o-Mini-Free",
+ "model": "ChatGPT",
+ "provider": "OpenAI",
"maxLength": 31200,
+ "tokenLimit": 7800,
+ "context": "8K",
+ },
+ "gpt-4o-mini": {
+ "id": "gpt-4o-mini",
+ "name": "GPT-4o-Mini",
"model": "ChatGPT",
+ "provider": "OpenAI",
+ "maxLength": 260000,
+ "tokenLimit": 126000,
+ "context": "128K",
+ },
+ "gpt-4o-free": {
+ "id": "gpt-4o-free",
"name": "GPT-4o-free",
+ "model": "ChatGPT",
"provider": "OpenAI",
+ "maxLength": 31200,
"tokenLimit": 7800,
+ "context": "8K",
},
- "gpt-3.5-turbo": {
- "id": "gpt-3.5-turbo",
- "name": "GPT-3.5-Turbo",
- "maxLength": 48000,
- "tokenLimit": 14000,
- "context": "16K",
+ "gpt-4o-2024-08-06": {
+ "id": "gpt-4o-2024-08-06",
+ "name": "GPT-4o",
+ "model": "ChatGPT",
+ "provider": "OpenAI",
+ "maxLength": 260000,
+ "tokenLimit": 126000,
+ "context": "128K",
},
- "gpt-4-turbo": {
- "id": "gpt-4-turbo-preview",
+ "gpt-4-turbo-2024-04-09": {
+ "id": "gpt-4-turbo-2024-04-09",
"name": "GPT-4-Turbo",
+ "model": "ChatGPT",
+ "provider": "OpenAI",
"maxLength": 260000,
"tokenLimit": 126000,
"context": "128K",
},
- "gpt-4": {
- "id": "gpt-4-plus",
- "name": "GPT-4-Plus",
- "maxLength": 130000,
- "tokenLimit": 31000,
- "context": "32K",
- },
- "gpt-4-0613": {
- "id": "gpt-4-0613",
- "name": "GPT-4-0613",
- "maxLength": 60000,
- "tokenLimit": 15000,
- "context": "16K",
- },
- "gemini-pro": {
- "id": "gemini-pro",
- "name": "Gemini-Pro",
- "maxLength": 120000,
- "tokenLimit": 30000,
- "context": "32K",
+ "grok-beta": {
+ "id": "grok-beta",
+ "name": "Grok-Beta",
+ "model": "Grok",
+ "provider": "x.ai",
+ "maxLength": 400000,
+ "tokenLimit": 100000,
+ "context": "100K",
+ },
+ "grok-2": {
+ "id": "grok-2",
+ "name": "Grok-2",
+ "model": "Grok",
+ "provider": "x.ai",
+ "maxLength": 400000,
+ "tokenLimit": 100000,
+ "context": "100K",
+ },
+ "grok-2-mini": {
+ "id": "grok-2-mini",
+ "name": "Grok-2-mini",
+ "model": "Grok",
+ "provider": "x.ai",
+ "maxLength": 400000,
+ "tokenLimit": 100000,
+ "context": "100K",
},
"claude-3-opus-20240229": {
"id": "claude-3-opus-20240229",
"name": "Claude-3-Opus",
+ "model": "Claude",
+ "provider": "Anthropic",
+ "maxLength": 800000,
+ "tokenLimit": 200000,
+ "context": "200K",
+ },
+ "claude-3-opus-20240229-aws": {
+ "id": "claude-3-opus-20240229-aws",
+ "name": "Claude-3-Opus-Aws",
+ "model": "Claude",
+ "provider": "Anthropic",
+ "maxLength": 800000,
+ "tokenLimit": 200000,
+ "context": "200K",
+ },
+ "claude-3-5-sonnet-20240620": {
+ "id": "claude-3-5-sonnet-20240620",
+ "name": "Claude-3.5-Sonnet",
+ "model": "Claude",
+ "provider": "Anthropic",
+ "maxLength": 800000,
+ "tokenLimit": 200000,
+ "context": "200K",
+ },
+ "claude-3-5-sonnet-20241022": {
+ "id": "claude-3-5-sonnet-20241022",
+ "name": "Claude-3.5-Sonnet-V2",
+ "model": "Claude",
+ "provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
@@ -64,6 +120,17 @@ models = {
"claude-3-sonnet-20240229": {
"id": "claude-3-sonnet-20240229",
"name": "Claude-3-Sonnet",
+ "model": "Claude",
+ "provider": "Anthropic",
+ "maxLength": 800000,
+ "tokenLimit": 200000,
+ "context": "200K",
+ },
+ "claude-3-haiku-20240307": {
+ "id": "claude-3-haiku-20240307",
+ "name": "Claude-3-Haiku",
+ "model": "Claude",
+ "provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
@@ -71,24 +138,30 @@ models = {
"claude-2.1": {
"id": "claude-2.1",
"name": "Claude-2.1-200k",
+ "model": "Claude",
+ "provider": "Anthropic",
"maxLength": 800000,
"tokenLimit": 200000,
"context": "200K",
},
- "claude-2.0": {
- "id": "claude-2.0",
- "name": "Claude-2.0-100k",
- "maxLength": 400000,
- "tokenLimit": 100000,
- "context": "100K",
+ "gemini-1.5-flash-002": {
+ "id": "gemini-1.5-flash-002",
+ "name": "Gemini-1.5-Flash-1M",
+ "model": "Gemini",
+ "provider": "Google",
+ "maxLength": 4000000,
+ "tokenLimit": 1000000,
+ "context": "1024K",
+ },
+ "gemini-1.5-pro-002": {
+ "id": "gemini-1.5-pro-002",
+ "name": "Gemini-1.5-Pro-1M",
+ "model": "Gemini",
+ "provider": "Google",
+ "maxLength": 4000000,
+ "tokenLimit": 1000000,
+ "context": "1024K",
},
- "claude-instant-1": {
- "id": "claude-instant-1",
- "name": "Claude-instant-1",
- "maxLength": 400000,
- "tokenLimit": 100000,
- "context": "100K",
- }
}
@@ -97,17 +170,51 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
working = True
supports_message_history = True
supports_system_message = True
- supports_gpt_35_turbo = True
- supports_gpt_4 = True
- default_model = "gpt-3.5-turbo"
- models = list(models)
+ default_model = "gpt-4o-2024-08-06"
+ models = list(models.keys())
+
model_aliases = {
- "claude-v2": "claude-2"
+ "gpt-4o-mini": "gpt-4o-mini-free",
+ "gpt-4o": "gpt-4o-free",
+ "gpt-4o": "gpt-4o-2024-08-06",
+
+ "gpt-4-turbo": "gpt-4-turbo-2024-04-09",
+ "gpt-4": "gpt-4o-mini-free",
+
+ "claude-3-opus": "claude-3-opus-20240229",
+ "claude-3-opus": "claude-3-opus-20240229-aws",
+ "claude-3-sonnet": "claude-3-sonnet-20240229",
+ "claude-3.5-sonnet": "claude-3-5-sonnet-20240620",
+ "claude-3.5-sonnet": "claude-3-5-sonnet-20241022",
+ "claude-3-haiku": "claude-3-haiku-20240307",
+ "claude-2.1": "claude-2.1",
+
+ "gemini-flash": "gemini-1.5-flash-002",
+ "gemini-pro": "gemini-1.5-pro-002",
}
+
_auth_code = ""
_cookie_jar = None
@classmethod
+ def get_model(cls, model: str) -> str:
+ """
+ Retrieve the internal model identifier based on the provided model name or alias.
+ """
+ if model in cls.model_aliases:
+ model = cls.model_aliases[model]
+ if model not in models:
+ raise ValueError(f"Model '{model}' is not supported.")
+ return model
+
+ @classmethod
+ def is_supported(cls, model: str) -> bool:
+ """
+ Check if the given model is supported.
+ """
+ return model in models or model in cls.model_aliases
+
+ @classmethod
async def create_async_generator(
cls,
model: str,
@@ -117,6 +224,8 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
connector: BaseConnector = None,
**kwargs
) -> AsyncResult:
+ model = cls.get_model(model)
+
headers = {
"authority": "liaobots.com",
"content-type": "application/json",
@@ -131,7 +240,7 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
) as session:
data = {
"conversationId": str(uuid.uuid4()),
- "model": models[cls.get_model(model)],
+ "model": models[model],
"messages": messages,
"key": "",
"prompt": kwargs.get("system_message", "You are a helpful assistant."),
@@ -189,3 +298,27 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
raise RuntimeError("Invalid session")
if chunk:
yield chunk.decode(errors="ignore")
+
+ @classmethod
+ async def initialize_auth_code(cls, session: ClientSession) -> None:
+ """
+ Initialize the auth code by making the necessary login requests.
+ """
+ async with session.post(
+ "https://liaobots.work/api/user",
+ json={"authcode": "pTIQr4FTnVRfr"},
+ verify_ssl=False
+ ) as response:
+ await raise_for_status(response)
+ cls._auth_code = (await response.json(content_type=None))["authCode"]
+ if not cls._auth_code:
+ raise RuntimeError("Empty auth code")
+ cls._cookie_jar = session.cookie_jar
+
+ @classmethod
+ async def ensure_auth_code(cls, session: ClientSession) -> None:
+ """
+ Ensure the auth code is initialized, and if not, perform the initialization.
+ """
+ if not cls._auth_code:
+ await cls.initialize_auth_code(session)