summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--g4f/Provider/AItianhuSpace.py4
-rw-r--r--g4f/Provider/DeepInfra.py25
-rw-r--r--g4f/Provider/GeekGpt.py4
-rw-r--r--g4f/Provider/MyShell.py4
-rw-r--r--g4f/Provider/PerplexityAi.py4
-rw-r--r--g4f/Provider/Pi.py8
-rw-r--r--g4f/Provider/TalkAi.py4
-rw-r--r--g4f/Provider/Vercel.py4
-rw-r--r--g4f/Provider/__init__.py20
-rw-r--r--g4f/Provider/base_provider.py48
-rw-r--r--g4f/Provider/deprecated/AiService.py4
-rw-r--r--g4f/Provider/deprecated/Aivvm.py9
-rw-r--r--g4f/Provider/deprecated/DfeHub.py4
-rw-r--r--g4f/Provider/deprecated/EasyChat.py5
-rw-r--r--g4f/Provider/deprecated/Equing.py4
-rw-r--r--g4f/Provider/deprecated/FastGpt.py7
-rw-r--r--g4f/Provider/deprecated/Forefront.py4
-rw-r--r--g4f/Provider/deprecated/GetGpt.py4
-rw-r--r--g4f/Provider/deprecated/Lockchat.py4
-rw-r--r--g4f/Provider/deprecated/V50.py4
-rw-r--r--g4f/Provider/deprecated/VoiGpt.py8
-rw-r--r--g4f/Provider/deprecated/Wuguokai.py4
-rw-r--r--g4f/Provider/helper.py6
-rw-r--r--g4f/Provider/needs_auth/Bard.py4
-rw-r--r--g4f/Provider/needs_auth/HuggingChat.py8
-rw-r--r--g4f/Provider/needs_auth/Poe.py4
-rw-r--r--g4f/Provider/needs_auth/Raycast.py4
-rw-r--r--g4f/Provider/needs_auth/Theb.py4
-rw-r--r--g4f/Provider/needs_auth/ThebApi.py4
-rw-r--r--g4f/Provider/retry_provider.py26
-rw-r--r--g4f/Provider/selenium/Phind.py4
-rw-r--r--g4f/Provider/unfinished/AiChatting.py4
-rw-r--r--g4f/__init__.py87
-rw-r--r--g4f/api/__init__.py50
-rw-r--r--g4f/base_provider.py54
-rw-r--r--g4f/debug.py48
-rw-r--r--g4f/errors.py2
-rw-r--r--g4f/gui/client/css/style.css7
-rw-r--r--g4f/gui/client/js/chat.v1.js63
-rw-r--r--g4f/gui/server/backend.py32
-rw-r--r--g4f/models.py27
-rw-r--r--g4f/typing.py2
-rw-r--r--g4f/version.py47
43 files changed, 393 insertions, 280 deletions
diff --git a/g4f/Provider/AItianhuSpace.py b/g4f/Provider/AItianhuSpace.py
index 8d9feb2b..11725db7 100644
--- a/g4f/Provider/AItianhuSpace.py
+++ b/g4f/Provider/AItianhuSpace.py
@@ -4,12 +4,12 @@ import time
import random
from ..typing import CreateResult, Messages
-from .base_provider import BaseProvider
+from .base_provider import AbstractProvider
from .helper import format_prompt, get_random_string
from ..webdriver import WebDriver, WebDriverSession
from .. import debug
-class AItianhuSpace(BaseProvider):
+class AItianhuSpace(AbstractProvider):
url = "https://chat3.aiyunos.top/"
working = True
supports_stream = True
diff --git a/g4f/Provider/DeepInfra.py b/g4f/Provider/DeepInfra.py
index 754439c1..1639bbd2 100644
--- a/g4f/Provider/DeepInfra.py
+++ b/g4f/Provider/DeepInfra.py
@@ -2,9 +2,9 @@ from __future__ import annotations
import requests, json
from ..typing import CreateResult, Messages
-from .base_provider import BaseProvider
+from .base_provider import AbstractProvider
-class DeepInfra(BaseProvider):
+class DeepInfra(AbstractProvider):
url: str = "https://deepinfra.com"
working: bool = True
supports_stream: bool = True
@@ -14,8 +14,10 @@ class DeepInfra(BaseProvider):
def create_completion(model: str,
messages: Messages,
stream: bool,
+ auth: str = None,
**kwargs) -> CreateResult:
-
+ if not model:
+ model = 'meta-llama/Llama-2-70b-chat-hf'
headers = {
'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'Cache-Control': 'no-cache',
@@ -34,9 +36,11 @@ class DeepInfra(BaseProvider):
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
}
+ if auth:
+ headers['Authorization'] = f"bearer {auth}"
json_data = json.dumps({
- 'model' : 'meta-llama/Llama-2-70b-chat-hf',
+ 'model' : model,
'messages': messages,
'stream' : True}, separators=(',', ':'))
@@ -45,18 +49,17 @@ class DeepInfra(BaseProvider):
response.raise_for_status()
first = True
-
- for line in response.iter_content(chunk_size=1024):
+ for line in response.content:
if line.startswith(b"data: [DONE]"):
break
-
elif line.startswith(b"data: "):
- chunk = json.loads(line[6:])["choices"][0]["delta"].get("content")
-
+ try:
+ chunk = json.loads(line[6:])["choices"][0]["delta"].get("content")
+ except Exception:
+ raise RuntimeError(f"Response: {line}")
if chunk:
if first:
chunk = chunk.lstrip()
if chunk:
first = False
-
- yield (chunk) \ No newline at end of file
+ yield chunk \ No newline at end of file
diff --git a/g4f/Provider/GeekGpt.py b/g4f/Provider/GeekGpt.py
index 9ed9c09b..f53ec9de 100644
--- a/g4f/Provider/GeekGpt.py
+++ b/g4f/Provider/GeekGpt.py
@@ -1,12 +1,12 @@
from __future__ import annotations
import requests, json
-from .base_provider import BaseProvider
+from .base_provider import AbstractProvider
from ..typing import CreateResult, Messages
from json import dumps
-class GeekGpt(BaseProvider):
+class GeekGpt(AbstractProvider):
url = 'https://chat.geekgpt.org'
working = True
supports_message_history = True
diff --git a/g4f/Provider/MyShell.py b/g4f/Provider/MyShell.py
index b0a01016..145cc0bf 100644
--- a/g4f/Provider/MyShell.py
+++ b/g4f/Provider/MyShell.py
@@ -3,11 +3,11 @@ from __future__ import annotations
import time, json
from ..typing import CreateResult, Messages
-from .base_provider import BaseProvider
+from .base_provider import AbstractProvider
from .helper import format_prompt
from ..webdriver import WebDriver, WebDriverSession, bypass_cloudflare
-class MyShell(BaseProvider):
+class MyShell(AbstractProvider):
url = "https://app.myshell.ai/chat"
working = True
supports_gpt_35_turbo = True
diff --git a/g4f/Provider/PerplexityAi.py b/g4f/Provider/PerplexityAi.py
index ad629aa8..023968dc 100644
--- a/g4f/Provider/PerplexityAi.py
+++ b/g4f/Provider/PerplexityAi.py
@@ -7,11 +7,11 @@ from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from ..typing import CreateResult, Messages
-from .base_provider import BaseProvider
+from .base_provider import AbstractProvider
from .helper import format_prompt
from ..webdriver import WebDriver, WebDriverSession
-class PerplexityAi(BaseProvider):
+class PerplexityAi(AbstractProvider):
url = "https://www.perplexity.ai"
working = True
supports_gpt_35_turbo = True
diff --git a/g4f/Provider/Pi.py b/g4f/Provider/Pi.py
index 529a4a52..2f7dc436 100644
--- a/g4f/Provider/Pi.py
+++ b/g4f/Provider/Pi.py
@@ -1,12 +1,12 @@
from __future__ import annotations
-from ..typing import CreateResult, Messages
-from .base_provider import BaseProvider, format_prompt
-
import json
+
+from ..typing import CreateResult, Messages
+from .base_provider import AbstractProvider, format_prompt
from ..requests import Session, get_session_from_browser
-class Pi(BaseProvider):
+class Pi(AbstractProvider):
url = "https://pi.ai/talk"
working = True
supports_stream = True
diff --git a/g4f/Provider/TalkAi.py b/g4f/Provider/TalkAi.py
index 85f56dda..d4efd269 100644
--- a/g4f/Provider/TalkAi.py
+++ b/g4f/Provider/TalkAi.py
@@ -3,10 +3,10 @@ from __future__ import annotations
import time, json, time
from ..typing import CreateResult, Messages
-from .base_provider import BaseProvider
+from .base_provider import AbstractProvider
from ..webdriver import WebDriver, WebDriverSession
-class TalkAi(BaseProvider):
+class TalkAi(AbstractProvider):
url = "https://talkai.info"
working = True
supports_gpt_35_turbo = True
diff --git a/g4f/Provider/Vercel.py b/g4f/Provider/Vercel.py
index 3e210925..466ea3de 100644
--- a/g4f/Provider/Vercel.py
+++ b/g4f/Provider/Vercel.py
@@ -3,10 +3,10 @@ from __future__ import annotations
import json, base64, requests, execjs, random, uuid
from ..typing import Messages, TypedDict, CreateResult, Any
-from .base_provider import BaseProvider
+from .base_provider import AbstractProvider
from ..debug import logging
-class Vercel(BaseProvider):
+class Vercel(AbstractProvider):
url = 'https://sdk.vercel.ai'
working = False
supports_message_history = True
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 4670d331..2ff8b837 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -1,11 +1,13 @@
from __future__ import annotations
-from .base_provider import BaseProvider, AsyncProvider, AsyncGeneratorProvider
-from .retry_provider import RetryProvider
-from .deprecated import *
-from .needs_auth import *
-from .unfinished import *
-from .selenium import *
+from ..base_provider import BaseProvider, ProviderType
+from .retry_provider import RetryProvider
+from .base_provider import AsyncProvider, AsyncGeneratorProvider
+from .deprecated import *
+from .needs_auth import *
+from .unfinished import *
+from .selenium import *
+
from .Aura import Aura
from .AiAsk import AiAsk
from .Aichat import Aichat
@@ -59,7 +61,7 @@ __modules__: list = [
getattr(sys.modules[__name__], provider) for provider in dir()
if not provider.startswith("__")
]
-__providers__: list[type[BaseProvider]] = [
+__providers__: list[ProviderType] = [
provider for provider in __modules__
if isinstance(provider, type)
and issubclass(provider, BaseProvider)
@@ -67,9 +69,9 @@ __providers__: list[type[BaseProvider]] = [
__all__: list[str] = [
provider.__name__ for provider in __providers__
]
-__map__: dict[str, type[BaseProvider]] = dict([
+__map__: dict[str, ProviderType] = dict([
(provider.__name__, provider) for provider in __providers__
])
class ProviderUtils:
- convert: dict[str, type[BaseProvider]] = __map__ \ No newline at end of file
+ convert: dict[str, ProviderType] = __map__ \ No newline at end of file
diff --git a/g4f/Provider/base_provider.py b/g4f/Provider/base_provider.py
index 62029f5d..6da7f6c6 100644
--- a/g4f/Provider/base_provider.py
+++ b/g4f/Provider/base_provider.py
@@ -1,12 +1,14 @@
from __future__ import annotations
import sys
+import asyncio
from asyncio import AbstractEventLoop
from concurrent.futures import ThreadPoolExecutor
-from abc import ABC, abstractmethod
+from abc import abstractmethod
from inspect import signature, Parameter
from .helper import get_event_loop, get_cookies, format_prompt
-from ..typing import CreateResult, AsyncResult, Messages
+from ..typing import CreateResult, AsyncResult, Messages, Union
+from ..base_provider import BaseProvider
if sys.version_info < (3, 10):
NoneType = type(None)
@@ -20,25 +22,7 @@ if sys.platform == 'win32':
):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
-class BaseProvider(ABC):
- url: str
- working: bool = False
- needs_auth: bool = False
- supports_stream: bool = False
- supports_gpt_35_turbo: bool = False
- supports_gpt_4: bool = False
- supports_message_history: bool = False
-
- @staticmethod
- @abstractmethod
- def create_completion(
- model: str,
- messages: Messages,
- stream: bool,
- **kwargs
- ) -> CreateResult:
- raise NotImplementedError()
-
+class AbstractProvider(BaseProvider):
@classmethod
async def create_async(
cls,
@@ -60,9 +44,12 @@ class BaseProvider(ABC):
**kwargs
))
- return await loop.run_in_executor(
- executor,
- create_func
+ return await asyncio.wait_for(
+ loop.run_in_executor(
+ executor,
+ create_func
+ ),
+ timeout=kwargs.get("timeout", 0)
)
@classmethod
@@ -102,16 +89,19 @@ class BaseProvider(ABC):
return f"g4f.Provider.{cls.__name__} supports: ({args}\n)"
-class AsyncProvider(BaseProvider):
+class AsyncProvider(AbstractProvider):
@classmethod
def create_completion(
cls,
model: str,
messages: Messages,
stream: bool = False,
+ *,
+ loop: AbstractEventLoop = None,
**kwargs
) -> CreateResult:
- loop = get_event_loop()
+ if not loop:
+ loop = get_event_loop()
coro = cls.create_async(model, messages, **kwargs)
yield loop.run_until_complete(coro)
@@ -134,9 +124,12 @@ class AsyncGeneratorProvider(AsyncProvider):
model: str,
messages: Messages,
stream: bool = True,
+ *,
+ loop: AbstractEventLoop = None,
**kwargs
) -> CreateResult:
- loop = get_event_loop()
+ if not loop:
+ loop = get_event_loop()
generator = cls.create_async_generator(
model,
messages,
@@ -171,6 +164,7 @@ class AsyncGeneratorProvider(AsyncProvider):
def create_async_generator(
model: str,
messages: Messages,
+ stream: bool = True,
**kwargs
) -> AsyncResult:
raise NotImplementedError()
diff --git a/g4f/Provider/deprecated/AiService.py b/g4f/Provider/deprecated/AiService.py
index 325af670..acd7f5ea 100644
--- a/g4f/Provider/deprecated/AiService.py
+++ b/g4f/Provider/deprecated/AiService.py
@@ -3,10 +3,10 @@ from __future__ import annotations
import requests
from ...typing import Any, CreateResult, Messages
-from ..base_provider import BaseProvider
+from ..base_provider import AbstractProvider
-class AiService(BaseProvider):
+class AiService(AbstractProvider):
url = "https://aiservice.vercel.app/"
working = False
supports_gpt_35_turbo = True
diff --git a/g4f/Provider/deprecated/Aivvm.py b/g4f/Provider/deprecated/Aivvm.py
index 8b5a9e05..c973adf8 100644
--- a/g4f/Provider/deprecated/Aivvm.py
+++ b/g4f/Provider/deprecated/Aivvm.py
@@ -1,9 +1,10 @@
from __future__ import annotations
+
import requests
+import json
-from ..base_provider import BaseProvider
+from ..base_provider import AbstractProvider
from ...typing import CreateResult, Messages
-from json import dumps
# to recreate this easily, send a post request to https://chat.aivvm.com/api/models
models = {
@@ -17,7 +18,7 @@ models = {
'gpt-4-32k-0613': {'id': 'gpt-4-32k-0613', 'name': 'GPT-4-32K-0613'},
}
-class Aivvm(BaseProvider):
+class Aivvm(AbstractProvider):
url = 'https://chat.aivvm.com'
supports_stream = True
working = False
@@ -44,7 +45,7 @@ class Aivvm(BaseProvider):
"temperature" : kwargs.get("temperature", 0.7)
}
- data = dumps(json_data)
+ data = json.dumps(json_data)
headers = {
"accept" : "text/event-stream",
diff --git a/g4f/Provider/deprecated/DfeHub.py b/g4f/Provider/deprecated/DfeHub.py
index 4458bac6..e6d13444 100644
--- a/g4f/Provider/deprecated/DfeHub.py
+++ b/g4f/Provider/deprecated/DfeHub.py
@@ -7,10 +7,10 @@ import time
import requests
from ...typing import Any, CreateResult
-from ..base_provider import BaseProvider
+from ..base_provider import AbstractProvider
-class DfeHub(BaseProvider):
+class DfeHub(AbstractProvider):
url = "https://chat.dfehub.com/"
supports_stream = True
supports_gpt_35_turbo = True
diff --git a/g4f/Provider/deprecated/EasyChat.py b/g4f/Provider/deprecated/EasyChat.py
index 3142f243..7a00f523 100644
--- a/g4f/Provider/deprecated/EasyChat.py
+++ b/g4f/Provider/deprecated/EasyChat.py
@@ -2,14 +2,13 @@ from __future__ import annotations
import json
import random
-
import requests
from ...typing import Any, CreateResult
-from ..base_provider import BaseProvider
+from ..base_provider import AbstractProvider
-class EasyChat(BaseProvider):
+class EasyChat(AbstractProvider):
url: str = "https://free.easychat.work"
supports_stream = True
supports_gpt_35_turbo = True
diff --git a/g4f/Provider/deprecated/Equing.py b/g4f/Provider/deprecated/Equing.py
index 9f510e50..5fd9797b 100644
--- a/g4f/Provider/deprecated/Equing.py
+++ b/g4f/Provider/deprecated/Equing.py
@@ -6,10 +6,10 @@ from abc import ABC, abstractmethod
import requests
from ...typing import Any, CreateResult
-from ..base_provider import BaseProvider
+from ..base_provider import AbstractProvider
-class Equing(BaseProvider):
+class Equing(AbstractProvider):
url: str = 'https://next.eqing.tech/'
working = False
supports_stream = True
diff --git a/g4f/Provider/deprecated/FastGpt.py b/g4f/Provider/deprecated/FastGpt.py
index 3af8c213..6a79d9aa 100644
--- a/g4f/Provider/deprecated/FastGpt.py
+++ b/g4f/Provider/deprecated/FastGpt.py
@@ -2,15 +2,13 @@ from __future__ import annotations
import json
import random
-from abc import ABC, abstractmethod
-
import requests
from ...typing import Any, CreateResult
-from ..base_provider import BaseProvider
+from ..base_provider import AbstractProvider
-class FastGpt(BaseProvider):
+class FastGpt(AbstractProvider):
url: str = 'https://chat9.fastgpt.me/'
working = False
needs_auth = False
@@ -19,7 +17,6 @@ class FastGpt(BaseProvider):
supports_gpt_4 = False
@staticmethod
- @abstractmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
diff --git a/g4f/Provider/deprecated/Forefront.py b/g4f/Provider/deprecated/Forefront.py
index 2f807e91..39654b2c 100644
--- a/g4f/Provider/deprecated/Forefront.py
+++ b/g4f/Provider/deprecated/Forefront.py
@@ -5,10 +5,10 @@ import json
import requests
from ...typing import Any, CreateResult
-from ..base_provider import BaseProvider
+from ..base_provider import AbstractProvider
-class Forefront(BaseProvider):
+class Forefront(AbstractProvider):
url = "https://forefront.com"
supports_stream = True
supports_gpt_35_turbo = True
diff --git a/g4f/Provider/deprecated/GetGpt.py b/g4f/Provider/deprecated/GetGpt.py
index a7f4695c..69851ee5 100644
--- a/g4f/Provider/deprecated/GetGpt.py
+++ b/g4f/Provider/deprecated/GetGpt.py
@@ -11,10 +11,10 @@ except ImportError:
from Cryptodome.Cipher import AES
from ...typing import Any, CreateResult
-from ..base_provider import BaseProvider
+from ..base_provider import AbstractProvider
-class GetGpt(BaseProvider):
+class GetGpt(AbstractProvider):
url = 'https://chat.getgpt.world/'
supports_stream = True
working = False
diff --git a/g4f/Provider/deprecated/Lockchat.py b/g4f/Provider/deprecated/Lockchat.py
index f885672d..edab0bd4 100644
--- a/g4f/Provider/deprecated/Lockchat.py
+++ b/g4f/Provider/deprecated/Lockchat.py
@@ -5,10 +5,10 @@ import json
import requests
from ...typing import Any, CreateResult
-from ..base_provider import BaseProvider
+from ..base_provider import AbstractProvider
-class Lockchat(BaseProvider):
+class Lockchat(AbstractProvider):
url: str = "http://supertest.lockchat.app"
supports_stream = True
supports_gpt_35_turbo = True
diff --git a/g4f/Provider/deprecated/V50.py b/g4f/Provider/deprecated/V50.py
index e24ac2d4..456445f7 100644
--- a/g4f/Provider/deprecated/V50.py
+++ b/g4f/Provider/deprecated/V50.py
@@ -5,10 +5,10 @@ import uuid
import requests
from ...typing import Any, CreateResult
-from ..base_provider import BaseProvider
+from ..base_provider import AbstractProvider
-class V50(BaseProvider):
+class V50(AbstractProvider):
url = 'https://p5.v50.ltd'
supports_gpt_35_turbo = True
supports_stream = False
diff --git a/g4f/Provider/deprecated/VoiGpt.py b/g4f/Provider/deprecated/VoiGpt.py
index b312709f..9b061e63 100644
--- a/g4f/Provider/deprecated/VoiGpt.py
+++ b/g4f/Provider/deprecated/VoiGpt.py
@@ -2,13 +2,11 @@ from __future__ import annotations
import json
import requests
-from .base_provider import BaseProvider
-from ..typing import Messages, CreateResult
-from .helper import get_cookies
+from ..base_provider import AbstractProvider
+from ...typing import Messages, CreateResult
-
-class VoiGpt(BaseProvider):
+class VoiGpt(AbstractProvider):
"""
VoiGpt - A provider for VoiGpt.com
diff --git a/g4f/Provider/deprecated/Wuguokai.py b/g4f/Provider/deprecated/Wuguokai.py
index 87877198..f12d1bfe 100644
--- a/g4f/Provider/deprecated/Wuguokai.py
+++ b/g4f/Provider/deprecated/Wuguokai.py
@@ -5,10 +5,10 @@ import random
import requests
from ...typing import Any, CreateResult
-from ..base_provider import BaseProvider, format_prompt
+from ..base_provider import AbstractProvider, format_prompt
-class Wuguokai(BaseProvider):
+class Wuguokai(AbstractProvider):
url = 'https://chat.wuguokai.xyz'
supports_gpt_35_turbo = True
working = False
diff --git a/g4f/Provider/helper.py b/g4f/Provider/helper.py
index ded59ee2..81f417dd 100644
--- a/g4f/Provider/helper.py
+++ b/g4f/Provider/helper.py
@@ -1,6 +1,5 @@
from __future__ import annotations
-import sys
import asyncio
import webbrowser
import random
@@ -8,7 +7,7 @@ import string
import secrets
import os
from os import path
-from asyncio import AbstractEventLoop
+from asyncio import AbstractEventLoop, BaseEventLoop
from platformdirs import user_config_dir
from browser_cookie3 import (
chrome,
@@ -34,7 +33,8 @@ _cookies: Dict[str, Dict[str, str]] = {}
def get_event_loop() -> AbstractEventLoop:
try:
loop = asyncio.get_event_loop()
- loop._check_closed()
+ if isinstance(loop, BaseEventLoop):
+ loop._check_closed()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
diff --git a/g4f/Provider/needs_auth/Bard.py b/g4f/Provider/needs_auth/Bard.py
index 48e535dd..cf1000c4 100644
--- a/g4f/Provider/needs_auth/Bard.py
+++ b/g4f/Provider/needs_auth/Bard.py
@@ -8,11 +8,11 @@ from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from ...typing import CreateResult, Messages
-from ..base_provider import BaseProvider
+from ..base_provider import AbstractProvider
from ..helper import format_prompt
from ...webdriver import WebDriver, WebDriverSession
-class Bard(BaseProvider):
+class Bard(AbstractProvider):
url = "https://bard.google.com"
working = True
needs_auth = True
diff --git a/g4f/Provider/needs_auth/HuggingChat.py b/g4f/Provider/needs_auth/HuggingChat.py
index 41c938b4..e4fa237d 100644
--- a/g4f/Provider/needs_auth/HuggingChat.py
+++ b/g4f/Provider/needs_auth/HuggingChat.py
@@ -8,6 +8,9 @@ from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider
from ..helper import format_prompt, get_cookies
+map = {
+ "openchat/openchat_3.5": "openchat/openchat-3.5-1210",
+}
class HuggingChat(AsyncGeneratorProvider):
url = "https://huggingface.co/chat"
@@ -25,7 +28,10 @@ class HuggingChat(AsyncGeneratorProvider):
cookies: dict = None,
**kwargs
) -> AsyncResult:
- model = model if model else cls.model
+ if not model:
+ model = cls.model
+ elif model in map:
+ model = map[model]
if not cookies:
cookies = get_cookies(".huggingface.co")
diff --git a/g4f/Provider/needs_auth/Poe.py b/g4f/Provider/needs_auth/Poe.py
index 200ded3b..41c5315a 100644
--- a/g4f/Provider/needs_auth/Poe.py
+++ b/g4f/Provider/needs_auth/Poe.py
@@ -3,7 +3,7 @@ from __future__ import annotations
import time
from ...typing import CreateResult, Messages
-from ..base_provider import BaseProvider
+from ..base_provider import AbstractProvider
from ..helper import format_prompt
from ...webdriver import WebDriver, WebDriverSession
@@ -20,7 +20,7 @@ models = {
"palm": {"name": "Google-PaLM"},
}
-class Poe(BaseProvider):
+class Poe(AbstractProvider):
url = "https://poe.com"
working = True
needs_auth = True
diff --git a/g4f/Provider/needs_auth/Raycast.py b/g4f/Provider/needs_auth/Raycast.py
index d7be98ac..07abeda3 100644
--- a/g4f/Provider/needs_auth/Raycast.py
+++ b/g4f/Provider/needs_auth/Raycast.py
@@ -5,10 +5,10 @@ import json
import requests
from ...typing import CreateResult, Messages
-from ..base_provider import BaseProvider
+from ..base_provider import AbstractProvider
-class Raycast(BaseProvider):
+class Raycast(AbstractProvider):
url = "https://raycast.com"
supports_gpt_35_turbo = True
supports_gpt_4 = True
diff --git a/g4f/Provider/needs_auth/Theb.py b/g4f/Provider/needs_auth/Theb.py
index 82eac6e2..efb38a40 100644
--- a/g4f/Provider/needs_auth/Theb.py
+++ b/g4f/Provider/needs_auth/Theb.py
@@ -3,7 +3,7 @@ from __future__ import annotations
import time
from ...typing import CreateResult, Messages
-from ..base_provider import BaseProvider
+from ..base_provider import AbstractProvider
from ..helper import format_prompt
from ...webdriver import WebDriver, WebDriverSession
@@ -31,7 +31,7 @@ models = {
"qwen-7b-chat": "Qwen 7B"
}
-class Theb(BaseProvider):
+class Theb(AbstractProvider):
url = "https://beta.theb.ai"
working = True
supports_gpt_35_turbo = True
diff --git a/g4f/Provider/needs_auth/ThebApi.py b/g4f/Provider/needs_auth/ThebApi.py
index 0441f352..8ec7bda8 100644
--- a/g4f/Provider/needs_auth/ThebApi.py
+++ b/g4f/Provider/needs_auth/ThebApi.py
@@ -3,7 +3,7 @@ from __future__ import annotations
import requests
from ...typing import Any, CreateResult, Messages
-from ..base_provider import BaseProvider
+from ..base_provider import AbstractProvider
models = {
"theb-ai": "TheB.AI",
@@ -29,7 +29,7 @@ models = {
"qwen-7b-chat": "Qwen 7B"
}
-class ThebApi(BaseProvider):
+class ThebApi(AbstractProvider):
url = "https://theb.ai"
working = True
needs_auth = True
diff --git a/g4f/Provider/retry_provider.py b/g4f/Provider/retry_provider.py
index e49b6da6..4d3e77ac 100644
--- a/g4f/Provider/retry_provider.py
+++ b/g4f/Provider/retry_provider.py
@@ -2,26 +2,13 @@ from __future__ import annotations
import asyncio
import random
-from typing import List, Type, Dict
from ..typing import CreateResult, Messages
-from .base_provider import BaseProvider, AsyncProvider
+from ..base_provider import BaseRetryProvider
from .. import debug
from ..errors import RetryProviderError, RetryNoProviderError
-class RetryProvider(AsyncProvider):
- __name__: str = "RetryProvider"
- supports_stream: bool = True
-
- def __init__(
- self,
- providers: List[Type[BaseProvider]],
- shuffle: bool = True
- ) -> None:
- self.providers: List[Type[BaseProvider]] = providers
- self.shuffle: bool = shuffle
- self.working = True
-
+class RetryProvider(BaseRetryProvider):
def create_completion(
self,
model: str,
@@ -36,20 +23,18 @@ class RetryProvider(AsyncProvider):
if self.shuffle:
random.shuffle(providers)
- self.exceptions: Dict[str, Exception] = {}
+ self.exceptions = {}
started: bool = False
for provider in providers:
+ self.last_provider = provider
try:
if debug.logging:
print(f"Using {provider.__name__} provider")
-
for token in provider.create_completion(model, messages, stream, **kwargs):
yield token
started = True
-
if started:
return
-
except Exception as e:
self.exceptions[provider.__name__] = e
if debug.logging:
@@ -69,8 +54,9 @@ class RetryProvider(AsyncProvider):
if self.shuffle:
random.shuffle(providers)
- self.exceptions: Dict[str, Exception] = {}
+ self.exceptions = {}
for provider in providers:
+ self.last_provider = provider
try:
return await asyncio.wait_for(
provider.create_async(model, messages, **kwargs),
diff --git a/g4f/Provider/selenium/Phind.py b/g4f/Provider/selenium/Phind.py
index 2722307d..b97d278f 100644
--- a/g4f/Provider/selenium/Phind.py
+++ b/g4f/Provider/selenium/Phind.py
@@ -4,11 +4,11 @@ import time
from urllib.parse import quote
from ...typing import CreateResult, Messages
-from ..base_provider import BaseProvider
+from ..base_provider import AbstractProvider
from ..helper import format_prompt
from ...webdriver import WebDriver, WebDriverSession
-class Phind(BaseProvider):
+class Phind(AbstractProvider):
url = "https://www.phind.com"
working = True
supports_gpt_4 = True
diff --git a/g4f/Provider/unfinished/AiChatting.py b/g4f/Provider/unfinished/AiChatting.py
index a66921c1..f062fa98 100644
--- a/g4f/Provider/unfinished/AiChatting.py
+++ b/g4f/Provider/unfinished/AiChatting.py
@@ -3,11 +3,11 @@ from __future__ import annotations
from urllib.parse import unquote
from ...typing import AsyncResult, Messages
-from ..base_provider import BaseProvider
+from ..base_provider import AbstractProvider
from ...webdriver import WebDriver
from ...requests import Session, get_session_from_browser
-class AiChatting(BaseProvider):
+class AiChatting(AbstractProvider):
url = "https://www.aichatting.net"
supports_gpt_35_turbo = True
_session: Session = None
diff --git a/g4f/__init__.py b/g4f/__init__.py
index 3b0fcad0..57151376 100644
--- a/g4f/__init__.py
+++ b/g4f/__init__.py
@@ -4,49 +4,57 @@ import os
from .errors import *
from .models import Model, ModelUtils, _all_models
-from .Provider import BaseProvider, AsyncGeneratorProvider, RetryProvider, ProviderUtils
-from .typing import Messages, CreateResult, AsyncResult, Union, List
-from . import debug
+from .Provider import AsyncGeneratorProvider, ProviderUtils
+from .typing import Messages, CreateResult, AsyncResult, Union
+from . import debug, version
+from .base_provider import BaseRetryProvider, ProviderType
def get_model_and_provider(model : Union[Model, str],
- provider : Union[type[BaseProvider], str, None],
+ provider : Union[ProviderType, str, None],
stream : bool,
- ignored : List[str] = None,
+ ignored : list[str] = None,
ignore_working: bool = False,
- ignore_stream: bool = False) -> tuple[Model, type[BaseProvider]]:
+ ignore_stream: bool = False) -> tuple[str, ProviderType]:
if debug.version_check:
debug.version_check = False
- debug.check_pypi_version()
-
+ version.utils.check_pypi_version()
+
if isinstance(provider, str):
if provider in ProviderUtils.convert:
provider = ProviderUtils.convert[provider]
else:
raise ProviderNotFoundError(f'Provider not found: {provider}')
- if isinstance(model, str):
- if model in ModelUtils.convert:
- model = ModelUtils.convert[model]
- else:
- raise ModelNotFoundError(f'The model: {model} does not exist')
-
if not provider:
+ if isinstance(model, str):
+ if model in ModelUtils.convert:
+ model = ModelUtils.convert[model]
+ else:
+ raise ModelNotFoundError(f'Model not found: {model}')
provider = model.best_provider
- if isinstance(provider, RetryProvider) and ignored:
- provider.providers = [p for p in provider.providers if p.__name__ not in ignored]
-
if not provider:
raise ProviderNotFoundError(f'No provider found for model: {model}')
- if not provider.working and not ignore_working:
+ if isinstance(model, Model):
+ model = model.name
+
+ if ignored and isinstance(provider, BaseRetryProvider):
+ provider.providers = [p for p in provider.providers if p.__name__ not in ignored]
+
+ if not ignore_working and not provider.working:
raise ProviderNotWorkingError(f'{provider.__name__} is not working')
if not ignore_stream and not provider.supports_stream and stream:
raise StreamNotSupportedError(f'{provider.__name__} does not support "stream" argument')
if debug.logging:
- print(f'Using {provider.__name__} provider')
+ if model:
+ print(f'Using {provider.__name__} provider and {model} model')
+ else:
+ print(f'Using {provider.__name__} provider')
+
+ debug.last_provider = provider
return model, provider
@@ -54,10 +62,10 @@ class ChatCompletion:
@staticmethod
def create(model : Union[Model, str],
messages : Messages,
- provider : Union[type[BaseProvider], str, None] = None,
+ provider : Union[ProviderType, str, None] = None,
stream : bool = False,
auth : Union[str, None] = None,
- ignored : List[str] = None,
+ ignored : list[str] = None,
ignore_working: bool = False,
ignore_stream_and_auth: bool = False,
**kwargs) -> Union[CreateResult, str]:
@@ -75,32 +83,33 @@ class ChatCompletion:
if proxy:
kwargs['proxy'] = proxy
- result = provider.create_completion(model.name, messages, stream, **kwargs)
+ result = provider.create_completion(model, messages, stream, **kwargs)
return result if stream else ''.join(result)
@staticmethod
- async def create_async(model : Union[Model, str],
- messages : Messages,
- provider : Union[type[BaseProvider], str, None] = None,
- stream : bool = False,
- ignored : List[str] = None,
- **kwargs) -> Union[AsyncResult, str]:
+ def create_async(model : Union[Model, str],
+ messages : Messages,
+ provider : Union[ProviderType, str, None] = None,
+ stream : bool = False,
+ ignored : list[str] = None,
+ **kwargs) -> Union[AsyncResult, str]:
+
model, provider = get_model_and_provider(model, provider, False, ignored)
if stream:
if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider):
- return await provider.create_async_generator(model.name, messages, **kwargs)
+ return provider.create_async_generator(model, messages, **kwargs)
raise StreamNotSupportedError(f'{provider.__name__} does not support "stream" argument in "create_async"')
- return await provider.create_async(model.name, messages, **kwargs)
+ return provider.create_async(model, messages, **kwargs)
class Completion:
@staticmethod
def create(model : Union[Model, str],
prompt : str,
- provider : Union[type[BaseProvider], None] = None,
+ provider : Union[ProviderType, None] = None,
stream : bool = False,
- ignored : List[str] = None, **kwargs) -> Union[CreateResult, str]:
+ ignored : list[str] = None, **kwargs) -> Union[CreateResult, str]:
allowed_models = [
'code-davinci-002',
@@ -111,10 +120,18 @@ class Completion:
'text-davinci-003'
]
if model not in allowed_models:
- raise ModelNotAllowed(f'Can\'t use {model} with Completion.create()')
+ raise ModelNotAllowedError(f'Can\'t use {model} with Completion.create()')
model, provider = get_model_and_provider(model, provider, stream, ignored)
- result = provider.create_completion(model.name, [{"role": "user", "content": prompt}], stream, **kwargs)
+ result = provider.create_completion(model, [{"role": "user", "content": prompt}], stream, **kwargs)
- return result if stream else ''.join(result) \ No newline at end of file
+ return result if stream else ''.join(result)
+
+def get_last_provider(as_dict: bool = False) -> ProviderType:
+ last = debug.last_provider
+ if isinstance(last, BaseRetryProvider):
+ last = last.last_provider
+ if last and as_dict:
+ return {"name": last.__name__, "url": last.url}
+ return last \ No newline at end of file
diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py
index 8369d70f..15e8b3a5 100644
--- a/g4f/api/__init__.py
+++ b/g4f/api/__init__.py
@@ -1,23 +1,25 @@
import ast
import logging
-
-from fastapi import FastAPI, Response, Request
-from fastapi.responses import StreamingResponse
-from typing import List, Union, Any, Dict, AnyStr
-#from ._tokenizer import tokenize
-from .. import BaseProvider
-
import time
import json
import random
import string
import uvicorn
import nest_asyncio
+
+from fastapi import FastAPI, Response, Request
+from fastapi.responses import StreamingResponse
+from typing import List, Union, Any, Dict, AnyStr
+#from ._tokenizer import tokenize
+
import g4f
+from .. import debug
+
+debug.logging = True
class Api:
def __init__(self, engine: g4f, debug: bool = True, sentry: bool = False,
- list_ignored_providers: List[Union[str, BaseProvider]] = None) -> None:
+ list_ignored_providers: List[str] = None) -> None:
self.engine = engine
self.debug = debug
self.sentry = sentry
@@ -75,7 +77,10 @@ class Api:
}
# item contains byte keys, and dict.get suppresses error
- item_data.update({key.decode('utf-8') if isinstance(key, bytes) else key: str(value) for key, value in (item or {}).items()})
+ item_data.update({
+ key.decode('utf-8') if isinstance(key, bytes) else key: str(value)
+ for key, value in (item or {}).items()
+ })
# messages is str, need dict
if isinstance(item_data.get('messages'), str):
item_data['messages'] = ast.literal_eval(item_data.get('messages'))
@@ -96,7 +101,12 @@ class Api:
)
except Exception as e:
logging.exception(e)
- return Response(content=json.dumps({"error": "An error occurred while generating the response."}, indent=4), media_type="application/json")
+ content = json.dumps({
+ "error": {"message": f"An error occurred while generating the response:\n{e}"},
+ "model": model,
+ "provider": g4f.get_last_provider(True)
+ })
+ return Response(content=content, status_code=500, media_type="application/json")
completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
completion_timestamp = int(time.time())
@@ -109,6 +119,7 @@ class Api:
'object': 'chat.completion',
'created': completion_timestamp,
'model': model,
+ 'provider': g4f.get_last_provider(True),
'choices': [
{
'index': 0,
@@ -136,6 +147,7 @@ class Api:
'object': 'chat.completion.chunk',
'created': completion_timestamp,
'model': model,
+ 'provider': g4f.get_last_provider(True),
'choices': [
{
'index': 0,
@@ -147,16 +159,14 @@ class Api:
}
],
}
-
- content = json.dumps(completion_data, separators=(',', ':'))
- yield f'data: {content}\n\n'
+ yield f'data: {json.dumps(completion_data)}\n\n'
time.sleep(0.03)
-
end_completion_data = {
'id': f'chatcmpl-{completion_id}',
'object': 'chat.completion.chunk',
'created': completion_timestamp,
'model': model,
+ 'provider': g4f.get_last_provider(True),
'choices': [
{
'index': 0,
@@ -165,15 +175,17 @@ class Api:
}
],
}
-
- content = json.dumps(end_completion_data, separators=(',', ':'))
- yield f'data: {content}\n\n'
+ yield f'data: {json.dumps(end_completion_data)}\n\n'
except GeneratorExit:
pass
except Exception as e:
logging.exception(e)
- content=json.dumps({"error": "An error occurred while generating the response."}, indent=4)
- yield f'data: {content}\n\n'
+ content = json.dumps({
+ "error": {"message": f"An error occurred while generating the response:\n{e}"},
+ "model": model,
+ "provider": g4f.get_last_provider(True),
+ })
+ yield f'data: {content}'
return StreamingResponse(streaming(), media_type="text/event-stream")
diff --git a/g4f/base_provider.py b/g4f/base_provider.py
new file mode 100644
index 00000000..84cbc384
--- /dev/null
+++ b/g4f/base_provider.py
@@ -0,0 +1,54 @@
+from abc import ABC, abstractmethod
+from .typing import Messages, CreateResult, Union
+
+class BaseProvider(ABC):
+ url: str
+ working: bool = False
+ needs_auth: bool = False
+ supports_stream: bool = False
+ supports_gpt_35_turbo: bool = False
+ supports_gpt_4: bool = False
+ supports_message_history: bool = False
+ params: str
+
+ @classmethod
+ @abstractmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool,
+ **kwargs
+ ) -> CreateResult:
+ raise NotImplementedError()
+
+ @classmethod
+ @abstractmethod
+ async def create_async(
+ cls,
+ model: str,
+ messages: Messages,
+ **kwargs
+ ) -> str:
+ raise NotImplementedError()
+
+ @classmethod
+ def get_dict(cls):
+ return {'name': cls.__name__, 'url': cls.url}
+
+class BaseRetryProvider(BaseProvider):
+ __name__: str = "RetryProvider"
+ supports_stream: bool = True
+
+ def __init__(
+ self,
+ providers: list[type[BaseProvider]],
+ shuffle: bool = True
+ ) -> None:
+ self.providers: list[type[BaseProvider]] = providers
+ self.shuffle: bool = shuffle
+ self.working: bool = True
+ self.exceptions: dict[str, Exception] = {}
+ self.last_provider: type[BaseProvider] = None
+
+ProviderType = Union[type[BaseProvider], BaseRetryProvider] \ No newline at end of file
diff --git a/g4f/debug.py b/g4f/debug.py
index 9d2f40ed..68a4f1ec 100644
--- a/g4f/debug.py
+++ b/g4f/debug.py
@@ -1,45 +1,5 @@
-from os import environ
-import requests
-from importlib.metadata import version as get_package_version, PackageNotFoundError
-from subprocess import check_output, CalledProcessError, PIPE
-from .errors import VersionNotFoundError
+from .base_provider import ProviderType
-logging = False
-version_check = True
-
-def get_version() -> str:
- # Read from package manager
- try:
- return get_package_version("g4f")
- except PackageNotFoundError:
- pass
- # Read from docker environment
- current_version = environ.get("G4F_VERSION")
- if current_version:
- return current_version
- # Read from git repository
- try:
- command = ["git", "describe", "--tags", "--abbrev=0"]
- return check_output(command, text=True, stderr=PIPE).strip()
- except CalledProcessError:
- pass
- raise VersionNotFoundError("Version not found")
-
-def get_latest_version() -> str:
- if environ.get("G4F_VERSION"):
- url = "https://registry.hub.docker.com/v2/repositories/"
- url += "hlohaus789/g4f"
- url += "/tags?page_size=2&ordering=last_updated"
- response = requests.get(url).json()
- return response["results"][1]["name"]
- response = requests.get("https://pypi.org/pypi/g4f/json").json()
- return response["info"]["version"]
-
-def check_pypi_version() -> None:
- try:
- version = get_version()
- latest_version = get_latest_version()
- if version != latest_version:
- print(f'New pypi version: {latest_version} (current: {version}) | pip install -U g4f')
- except Exception as e:
- print(f'Failed to check g4f pypi version: {e}') \ No newline at end of file
+logging: bool = False
+version_check: bool = True
+last_provider: ProviderType = None \ No newline at end of file
diff --git a/g4f/errors.py b/g4f/errors.py
index b554aead..b874435a 100644
--- a/g4f/errors.py
+++ b/g4f/errors.py
@@ -13,7 +13,7 @@ class AuthenticationRequiredError(Exception):
class ModelNotFoundError(Exception):
pass
-class ModelNotAllowed(Exception):
+class ModelNotAllowedError(Exception):
pass
class RetryProviderError(Exception):
diff --git a/g4f/gui/client/css/style.css b/g4f/gui/client/css/style.css
index b6d73650..e619b409 100644
--- a/g4f/gui/client/css/style.css
+++ b/g4f/gui/client/css/style.css
@@ -295,11 +295,12 @@ body {
gap: 18px;
}
-.message .content p,
-.message .content li,
-.message .content code {
+.message .content,
+.message .content a:link,
+.message .content a:visited{
font-size: 15px;
line-height: 1.3;
+ color: var(--colour-3);
}
.message .content pre {
white-space: pre-wrap;
diff --git a/g4f/gui/client/js/chat.v1.js b/g4f/gui/client/js/chat.v1.js
index 644ff77a..638ce0ac 100644
--- a/g4f/gui/client/js/chat.v1.js
+++ b/g4f/gui/client/js/chat.v1.js
@@ -73,7 +73,7 @@ const ask_gpt = async () => {
provider = document.getElementById("provider");
model = document.getElementById("model");
prompt_lock = true;
- window.text = ``;
+ window.text = '';
stop_generating.classList.remove(`stop_generating-hidden`);
@@ -88,10 +88,13 @@ const ask_gpt = async () => {
${gpt_image} <i class="fa-regular fa-phone-arrow-down-left"></i>
</div>
<div class="content" id="gpt_${window.token}">
- <div id="cursor"></div>
+ <div class="provider"></div>
+ <div class="content_inner"><div id="cursor"></div></div>
</div>
</div>
`;
+ content = document.getElementById(`gpt_${window.token}`);
+ content_inner = content.querySelector('.content_inner');
message_box.scrollTop = message_box.scrollHeight;
window.scrollTo(0, 0);
@@ -123,28 +126,38 @@ const ask_gpt = async () => {
await new Promise((r) => setTimeout(r, 1000));
window.scrollTo(0, 0);
- const reader = response.body.getReader();
+ const reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
+
+ error = provider = null;
while (true) {
const { value, done } = await reader.read();
if (done) break;
-
- chunk = new TextDecoder().decode(value);
-
- text += chunk;
-
- document.getElementById(`gpt_${window.token}`).innerHTML = markdown_render(text);
- document.querySelectorAll(`code`).forEach((el) => {
- hljs.highlightElement(el);
- });
+ for (const line of value.split("\n")) {
+ if (!line) continue;
+ const message = JSON.parse(line);
+ if (message["type"] == "content") {
+ text += message["content"];
+ } else if (message["type"] == "provider") {
+ provider = message["provider"];
+ content.querySelector('.provider').innerHTML =
+ '<a href="' + provider.url + '" target="_blank">' + provider.name + "</a>"
+ } else if (message["type"] == "error") {
+ error = message["error"];
+ }
+ }
+ if (error) {
+ console.error(error);
+ content_inner.innerHTML = "An error occured, please try again, if the problem persists, please use a other model or provider";
+ } else {
+ content_inner.innerHTML = markdown_render(text);
+ document.querySelectorAll('code').forEach((el) => {
+ hljs.highlightElement(el);
+ });
+ }
window.scrollTo(0, 0);
message_box.scrollTo({ top: message_box.scrollHeight, behavior: "auto" });
}
-
- if (text.includes(`G4F_ERROR`)) {
- console.log("response", text);
- document.getElementById(`gpt_${window.token}`).innerHTML = "An error occured, please try again, if the problem persists, please use a other model or provider";
- }
} catch (e) {
console.log(e);
@@ -153,13 +166,13 @@ const ask_gpt = async () => {
if (e.name != `AbortError`) {
text = `oops ! something went wrong, please try again / reload. [stacktrace in console]`;
- document.getElementById(`gpt_${window.token}`).innerHTML = text;
+ content_inner.innerHTML = text;
} else {
- document.getElementById(`gpt_${window.token}`).innerHTML += ` [aborted]`;
+ content_inner.innerHTML += ` [aborted]`;
text += ` [aborted]`
}
}
- add_message(window.conversation_id, "assistant", text);
+ add_message(window.conversation_id, "assistant", text, provider);
message_box.scrollTop = message_box.scrollHeight;
await remove_cancel_button();
prompt_lock = false;
@@ -259,10 +272,11 @@ const load_conversation = async (conversation_id) => {
}
</div>
<div class="content">
- ${item.role == "assistant"
- ? markdown_render(item.content)
- : item.content
+ ${item.provider
+ ? '<div class="provider"><a href="' + item.provider.url + '" target="_blank">' + item.provider.name + '</a></div>'
+ : ''
}
+ <div class="content_inner">${markdown_render(item.content)}</div>
</div>
</div>
`;
@@ -323,12 +337,13 @@ const remove_last_message = async (conversation_id) => {
);
};
-const add_message = async (conversation_id, role, content) => {
+const add_message = async (conversation_id, role, content, provider) => {
const conversation = await get_conversation(conversation_id);
conversation.items.push({
role: role,
content: content,
+ provider: provider
});
localStorage.setItem(
diff --git a/g4f/gui/server/backend.py b/g4f/gui/server/backend.py
index 105edb43..1aa506b2 100644
--- a/g4f/gui/server/backend.py
+++ b/g4f/gui/server/backend.py
@@ -4,7 +4,7 @@ from g4f.Provider import __providers__
import json
from flask import request, Flask
from .internet import get_search_message
-from g4f import debug
+from g4f import debug, version
debug.logging = True
@@ -53,8 +53,8 @@ class Backend_Api:
def version(self):
return {
- "version": debug.get_version(),
- "lastet_version": debug.get_latest_version(),
+ "version": version.utils.current_version,
+ "lastet_version": version.utils.latest_version,
}
def _gen_title(self):
@@ -65,7 +65,7 @@ class Backend_Api:
def _conversation(self):
#jailbreak = request.json['jailbreak']
messages = request.json['meta']['content']['parts']
- if request.json['internet_access']:
+ if request.json.get('internet_access'):
messages[-1]["content"] = get_search_message(messages[-1]["content"])
model = request.json.get('model')
model = model if model else g4f.models.default
@@ -74,20 +74,30 @@ class Backend_Api:
def try_response():
try:
- yield from g4f.ChatCompletion.create(
+ first = True
+ for chunk in g4f.ChatCompletion.create(
model=model,
provider=provider,
messages=messages,
stream=True,
ignore_stream_and_auth=True
- )
+ ):
+ if first:
+ first = False
+ yield json.dumps({
+ 'type' : 'provider',
+ 'provider': g4f.get_last_provider(True)
+ }) + "\n"
+ yield json.dumps({
+ 'type' : 'content',
+ 'content': chunk,
+ }) + "\n"
+
except Exception as e:
- print(e)
yield json.dumps({
- 'code' : 'G4F_ERROR',
- '_action': '_ask',
- 'success': False,
- 'error' : f'{e.__class__.__name__}: {e}'
+ 'type' : 'error',
+ 'error': f'{e.__class__.__name__}: {e}'
})
+ raise e
return self.app.response_class(try_response(), mimetype='text/event-stream') \ No newline at end of file
diff --git a/g4f/models.py b/g4f/models.py
index 9a4539c5..264cd40e 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -1,7 +1,6 @@
from __future__ import annotations
from dataclasses import dataclass
-from .typing import Union
-from .Provider import BaseProvider, RetryProvider
+from .Provider import RetryProvider, ProviderType
from .Provider import (
Chatgpt4Online,
ChatgptDemoAi,
@@ -36,7 +35,7 @@ from .Provider import (
class Model:
name: str
base_provider: str
- best_provider: Union[type[BaseProvider], RetryProvider] = None
+ best_provider: ProviderType = None
@staticmethod
def __all__() -> list[str]:
@@ -101,28 +100,39 @@ gpt_4_turbo = Model(
llama2_7b = Model(
name = "meta-llama/Llama-2-7b-chat-hf",
base_provider = 'huggingface',
- best_provider = RetryProvider([Llama2, DeepInfra]))
+ best_provider = RetryProvider([Llama2, DeepInfra])
+)
llama2_13b = Model(
name = "meta-llama/Llama-2-13b-chat-hf",
base_provider = 'huggingface',
- best_provider = RetryProvider([Llama2, DeepInfra]))
+ best_provider = RetryProvider([Llama2, DeepInfra])
+)
llama2_70b = Model(
name = "meta-llama/Llama-2-70b-chat-hf",
base_provider = "huggingface",
- best_provider = RetryProvider([Llama2, DeepInfra, HuggingChat]))
+ best_provider = RetryProvider([Llama2, DeepInfra, HuggingChat])
+)
# Mistal
mixtral_8x7b = Model(
name = "mistralai/Mixtral-8x7B-Instruct-v0.1",
base_provider = "huggingface",
- best_provider = HuggingChat)
+ best_provider = RetryProvider([DeepInfra, HuggingChat])
+)
mistral_7b = Model(
name = "mistralai/Mistral-7B-Instruct-v0.1",
base_provider = "huggingface",
- best_provider = HuggingChat)
+ best_provider = RetryProvider([DeepInfra, HuggingChat])
+)
+
+openchat_35 = Model(
+ name = "openchat/openchat_3.5",
+ base_provider = "huggingface",
+ best_provider = RetryProvider([DeepInfra, HuggingChat])
+)
# Bard
palm = Model(
@@ -313,6 +323,7 @@ class ModelUtils:
# Mistral
'mixtral-8x7b': mixtral_8x7b,
'mistral-7b': mistral_7b,
+ 'openchat_3.5': openchat_35,
# Bard
'palm2' : palm,
diff --git a/g4f/typing.py b/g4f/typing.py
index cfddf4a8..c93a4bcf 100644
--- a/g4f/typing.py
+++ b/g4f/typing.py
@@ -1,5 +1,5 @@
import sys
-from typing import Any, AsyncGenerator, Generator, NewType, Tuple, Union, List, Dict
+from typing import Any, AsyncGenerator, Generator, NewType, Tuple, Union, List, Dict, Type
if sys.version_info >= (3, 8):
from typing import TypedDict
diff --git a/g4f/version.py b/g4f/version.py
new file mode 100644
index 00000000..9e572bbb
--- /dev/null
+++ b/g4f/version.py
@@ -0,0 +1,47 @@
+from os import environ
+import requests
+from functools import cached_property
+from importlib.metadata import version as get_package_version, PackageNotFoundError
+from subprocess import check_output, CalledProcessError, PIPE
+from .errors import VersionNotFoundError
+
+
+class VersionUtils():
+ @cached_property
+ def current_version(self) -> str:
+ # Read from package manager
+ try:
+ return get_package_version("g4f")
+ except PackageNotFoundError:
+ pass
+ # Read from docker environment
+ version = environ.get("G4F_VERSION")
+ if version:
+ return version
+ # Read from git repository
+ try:
+ command = ["git", "describe", "--tags", "--abbrev=0"]
+ return check_output(command, text=True, stderr=PIPE).strip()
+ except CalledProcessError:
+ pass
+ raise VersionNotFoundError("Version not found")
+
+ @cached_property
+ def latest_version(self) -> str:
+ try:
+ get_package_version("g4f")
+ response = requests.get("https://pypi.org/pypi/g4f/json").json()
+ return response["info"]["version"]
+ except PackageNotFoundError:
+ url = "https://api.github.com/repos/xtekky/gpt4free/releases/latest"
+ response = requests.get(url).json()
+ return response["tag_name"]
+
+ def check_pypi_version(self) -> None:
+ try:
+ if self.current_version != self.latest_version:
+ print(f'New pypi version: {self.latest_version} (current: {self.version}) | pip install -U g4f')
+ except Exception as e:
+ print(f'Failed to check g4f pypi version: {e}')
+
+utils = VersionUtils() \ No newline at end of file