summaryrefslogtreecommitdiffstats
path: root/g4f
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--g4f/Provider/EasyChat.py2
-rw-r--r--g4f/Provider/Equing.py6
-rw-r--r--g4f/Provider/GetGpt.py2
-rw-r--r--g4f/Provider/Liaobots.py2
-rw-r--r--g4f/Provider/Wuguokai.py11
-rw-r--r--g4f/__init__.py13
-rw-r--r--g4f/models.py21
7 files changed, 33 insertions, 24 deletions
diff --git a/g4f/Provider/EasyChat.py b/g4f/Provider/EasyChat.py
index 946d4a4d..dae5196d 100644
--- a/g4f/Provider/EasyChat.py
+++ b/g4f/Provider/EasyChat.py
@@ -13,7 +13,7 @@ class EasyChat(BaseProvider):
url: str = "https://free.easychat.work"
supports_stream = True
supports_gpt_35_turbo = True
- working = True
+ working = False
@staticmethod
def create_completion(
diff --git a/g4f/Provider/Equing.py b/g4f/Provider/Equing.py
index 0ebb93a5..261c53c0 100644
--- a/g4f/Provider/Equing.py
+++ b/g4f/Provider/Equing.py
@@ -6,12 +6,12 @@ from abc import ABC, abstractmethod
import requests
from ..typing import Any, CreateResult
+from .base_provider import BaseProvider
-class Equing(ABC):
+class Equing(BaseProvider):
url: str = 'https://next.eqing.tech/'
- working = True
- needs_auth = False
+ working = False
supports_stream = True
supports_gpt_35_turbo = True
supports_gpt_4 = False
diff --git a/g4f/Provider/GetGpt.py b/g4f/Provider/GetGpt.py
index 74e772b0..b96efaac 100644
--- a/g4f/Provider/GetGpt.py
+++ b/g4f/Provider/GetGpt.py
@@ -14,7 +14,7 @@ from .base_provider import BaseProvider
class GetGpt(BaseProvider):
url = 'https://chat.getgpt.world/'
supports_stream = True
- working = True
+ working = False
supports_gpt_35_turbo = True
@staticmethod
diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py
index 2cc5ff99..33224d2e 100644
--- a/g4f/Provider/Liaobots.py
+++ b/g4f/Provider/Liaobots.py
@@ -31,7 +31,7 @@ models = {
class Liaobots(AsyncGeneratorProvider):
url = "https://liaobots.com"
- working = True
+ working = False
supports_gpt_35_turbo = True
supports_gpt_4 = True
_auth_code = None
diff --git a/g4f/Provider/Wuguokai.py b/g4f/Provider/Wuguokai.py
index a9614626..0a46f6ee 100644
--- a/g4f/Provider/Wuguokai.py
+++ b/g4f/Provider/Wuguokai.py
@@ -5,13 +5,13 @@ import random
import requests
from ..typing import Any, CreateResult
-from .base_provider import BaseProvider
+from .base_provider import BaseProvider, format_prompt
class Wuguokai(BaseProvider):
url = 'https://chat.wuguokai.xyz'
supports_gpt_35_turbo = True
- working = True
+ working = False
@staticmethod
def create_completion(
@@ -20,11 +20,6 @@ class Wuguokai(BaseProvider):
stream: bool,
**kwargs: Any,
) -> CreateResult:
- base = ''
- for message in messages:
- base += '%s: %s\n' % (message['role'], message['content'])
- base += 'assistant:'
-
headers = {
'authority': 'ai-api.wuguokai.xyz',
'accept': 'application/json, text/plain, */*',
@@ -41,7 +36,7 @@ class Wuguokai(BaseProvider):
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
}
data ={
- "prompt": base,
+ "prompt": format_prompt(messages),
"options": {},
"userId": f"#/chat/{random.randint(1,99999999)}",
"usingContext": True
diff --git a/g4f/__init__.py b/g4f/__init__.py
index 065acee6..90b05c85 100644
--- a/g4f/__init__.py
+++ b/g4f/__init__.py
@@ -2,6 +2,7 @@ from __future__ import annotations
from . import models
from .Provider import BaseProvider
from .typing import Any, CreateResult, Union
+import random
logging = False
@@ -13,14 +14,22 @@ class ChatCompletion:
provider : Union[type[BaseProvider], None] = None,
stream : bool = False,
auth : Union[str, None] = None, **kwargs: Any) -> Union[CreateResult, str]:
-
+
if isinstance(model, str):
try:
model = models.ModelUtils.convert[model]
except KeyError:
raise Exception(f'The model: {model} does not exist')
- provider = model.best_provider if provider == None else provider
+
+ if not provider:
+ if isinstance(model.best_provider, tuple):
+ provider = random.choice(model.best_provider)
+ else:
+ provider = model.best_provider
+
+ if not provider:
+ raise Exception(f'No provider found')
if not provider.working:
raise Exception(f'{provider.__name__} is not working')
diff --git a/g4f/models.py b/g4f/models.py
index 0c5eb961..e095ce7e 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -1,31 +1,37 @@
from __future__ import annotations
from dataclasses import dataclass
-from .Provider import Bard, BaseProvider, GetGpt, H2o, Liaobots, Vercel, Equing
+from .Provider import BaseProvider, Bard, H2o, Vercel
+from .Provider import Aichat, Aivvm, ChatBase, ChatgptAi, ChatgptLogin, CodeLinkAva
+from .Provider import DeepAi, Vercel, Vitalentum, Ylokh, You, Yqcloud
+from .typing import Union
@dataclass
class Model:
name: str
base_provider: str
- best_provider: type[BaseProvider]
+ best_provider: Union[type[BaseProvider], tuple[type[BaseProvider]]] = None
# Config for HuggingChat, OpenAssistant
# Works for Liaobots, H2o, OpenaiChat, Yqcloud, You
default = Model(
name="",
- base_provider="huggingface",
- best_provider=H2o,
+ base_provider="huggingface"
)
# GPT-3.5 / GPT-4
gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
- best_provider = GetGpt)
+ best_provider = (
+ Vercel, Aichat, Aivvm, ChatBase, ChatgptAi, ChatgptLogin,
+ CodeLinkAva, DeepAi, Vitalentum, Ylokh, You, Yqcloud
+ )
+)
gpt_4 = Model(
name = 'gpt-4',
base_provider = 'openai',
- best_provider = Liaobots)
+)
# Bard
palm = Model(
@@ -117,8 +123,7 @@ gpt_35_turbo_16k = Model(
gpt_35_turbo_16k_0613 = Model(
name = 'openai:gpt-3.5-turbo-16k-0613',
- base_provider = 'openai',
- best_provider = Equing)
+ base_provider = 'openai')
gpt_4_0613 = Model(
name = 'openai:gpt-4-0613',