summaryrefslogtreecommitdiffstats
path: root/g4f/Provider
diff options
context:
space:
mode:
authorHeiner Lohaus <heiner@lohaus.eu>2023-09-26 10:03:37 +0200
committerHeiner Lohaus <heiner@lohaus.eu>2023-09-26 10:03:37 +0200
commit3c2755bc72efa0d8e5d8b2883443530ba67ecad4 (patch)
tree778a3c87768574ecd5f9a4927fbb58fc4137c32b /g4f/Provider
parentAItianhuSpace Provider with GPT 4 added (diff)
downloadgpt4free-3c2755bc72efa0d8e5d8b2883443530ba67ecad4.tar
gpt4free-3c2755bc72efa0d8e5d8b2883443530ba67ecad4.tar.gz
gpt4free-3c2755bc72efa0d8e5d8b2883443530ba67ecad4.tar.bz2
gpt4free-3c2755bc72efa0d8e5d8b2883443530ba67ecad4.tar.lz
gpt4free-3c2755bc72efa0d8e5d8b2883443530ba67ecad4.tar.xz
gpt4free-3c2755bc72efa0d8e5d8b2883443530ba67ecad4.tar.zst
gpt4free-3c2755bc72efa0d8e5d8b2883443530ba67ecad4.zip
Diffstat (limited to 'g4f/Provider')
-rw-r--r--g4f/Provider/Aibn.py51
-rw-r--r--g4f/Provider/ChatgptDuo.py51
-rw-r--r--g4f/Provider/PerplexityAi.py2
-rw-r--r--g4f/Provider/Vercel.py2
-rw-r--r--g4f/Provider/__init__.py4
-rw-r--r--g4f/Provider/base_provider.py102
-rw-r--r--g4f/Provider/helper.py54
7 files changed, 191 insertions, 75 deletions
diff --git a/g4f/Provider/Aibn.py b/g4f/Provider/Aibn.py
new file mode 100644
index 00000000..1ef928be
--- /dev/null
+++ b/g4f/Provider/Aibn.py
@@ -0,0 +1,51 @@
+from __future__ import annotations
+
+import time
+import hashlib
+
+from ..typing import AsyncGenerator
+from g4f.requests import AsyncSession
+from .base_provider import AsyncGeneratorProvider
+
+
+class Aibn(AsyncGeneratorProvider):
+ url = "https://aibn.cc"
+ supports_gpt_35_turbo = True
+ working = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: list[dict[str, str]],
+ **kwargs
+ ) -> AsyncGenerator:
+ async with AsyncSession(impersonate="chrome107") as session:
+ timestamp = int(time.time())
+ data = {
+ "messages": messages,
+ "pass": None,
+ "sign": generate_signature(timestamp, messages[-1]["content"]),
+ "time": timestamp
+ }
+ async with session.post(f"{cls.url}/api/generate", json=data) as response:
+ response.raise_for_status()
+ async for chunk in response.content.iter_any():
+ yield chunk.decode()
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ("temperature", "float"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
+
+
+def generate_signature(timestamp: int, message: str, secret: str = "undefined"):
+ data = f"{timestamp}:{message}:{secret}"
+ return hashlib.sha256(data.encode()).hexdigest() \ No newline at end of file
diff --git a/g4f/Provider/ChatgptDuo.py b/g4f/Provider/ChatgptDuo.py
new file mode 100644
index 00000000..07f4c16c
--- /dev/null
+++ b/g4f/Provider/ChatgptDuo.py
@@ -0,0 +1,51 @@
+from __future__ import annotations
+
+from g4f.requests import AsyncSession
+from .base_provider import AsyncProvider, format_prompt
+
+
+class ChatgptDuo(AsyncProvider):
+ url = "https://chatgptduo.com"
+ supports_gpt_35_turbo = True
+ working = True
+
+ @classmethod
+ async def create_async(
+ cls,
+ model: str,
+ messages: list[dict[str, str]],
+ **kwargs
+ ) -> str:
+ async with AsyncSession(impersonate="chrome107") as session:
+ prompt = format_prompt(messages),
+ data = {
+ "prompt": prompt,
+ "search": prompt,
+ "purpose": "ask",
+ }
+ async with session.post(f"{cls.url}/", data=data) as response:
+ response.raise_for_status()
+ data = await response.json()
+
+ cls._sources = [{
+ "title": source["title"],
+ "url": source["link"],
+ "snippet": source["snippet"]
+ } for source in data["results"]]
+
+ return data["answer"]
+
+ @classmethod
+ def get_sources(cls):
+ return cls._sources
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
diff --git a/g4f/Provider/PerplexityAi.py b/g4f/Provider/PerplexityAi.py
index 269cdafd..fc0fd48c 100644
--- a/g4f/Provider/PerplexityAi.py
+++ b/g4f/Provider/PerplexityAi.py
@@ -58,7 +58,7 @@ class PerplexityAi(AsyncProvider):
result = json.loads(json.loads(line[3:])[0]["text"])
cls._sources = [{
- "name": source["name"],
+ "title": source["name"],
"url": source["url"],
"snippet": source["snippet"]
} for source in result["web_results"]]
diff --git a/g4f/Provider/Vercel.py b/g4f/Provider/Vercel.py
index 4102c07b..2d20ca6a 100644
--- a/g4f/Provider/Vercel.py
+++ b/g4f/Provider/Vercel.py
@@ -62,7 +62,7 @@ class Vercel(BaseProvider):
response.raise_for_status()
except:
continue
- for token in response.iter_content(chunk_size=8):
+ for token in response.iter_content(chunk_size=None):
yield token.decode()
break
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index ebe01603..59c91dd5 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -1,5 +1,6 @@
from __future__ import annotations
from .Acytoo import Acytoo
+from .Aibn import Aibn
from .Aichat import Aichat
from .Ails import Ails
from .AiService import AiService
@@ -10,6 +11,7 @@ from .Bard import Bard
from .Bing import Bing
from .ChatBase import ChatBase
from .ChatgptAi import ChatgptAi
+from .ChatgptDuo import ChatgptDuo
from .ChatgptLogin import ChatgptLogin
from .CodeLinkAva import CodeLinkAva
from .DeepAi import DeepAi
@@ -49,6 +51,7 @@ __all__ = [
'AsyncGeneratorProvider',
'RetryProvider',
'Acytoo',
+ 'Aibn',
'Aichat',
'Ails',
'AiService',
@@ -59,6 +62,7 @@ __all__ = [
'Bing',
'ChatBase',
'ChatgptAi',
+ 'ChatgptDuo',
'ChatgptLogin',
'CodeLinkAva',
'DeepAi',
diff --git a/g4f/Provider/base_provider.py b/g4f/Provider/base_provider.py
index e8a54f78..a21dc871 100644
--- a/g4f/Provider/base_provider.py
+++ b/g4f/Provider/base_provider.py
@@ -1,13 +1,10 @@
from __future__ import annotations
-import asyncio
-import functools
-from asyncio import SelectorEventLoop, AbstractEventLoop
+from asyncio import AbstractEventLoop
from concurrent.futures import ThreadPoolExecutor
from abc import ABC, abstractmethod
-import browser_cookie3
-
+from .helper import get_event_loop, get_cookies, format_prompt
from ..typing import AsyncGenerator, CreateResult
@@ -40,20 +37,18 @@ class BaseProvider(ABC):
**kwargs
) -> str:
if not loop:
- loop = asyncio.get_event_loop()
-
- partial_func = functools.partial(
- cls.create_completion,
- model,
- messages,
- False,
- **kwargs
- )
- response = await loop.run_in_executor(
+ loop = get_event_loop()
+ def create_func():
+ return "".join(cls.create_completion(
+ model,
+ messages,
+ False,
+ **kwargs
+ ))
+ return await loop.run_in_executor(
executor,
- partial_func
+ create_func
)
- return "".join(response)
@classmethod
@property
@@ -76,11 +71,9 @@ class AsyncProvider(BaseProvider):
stream: bool = False,
**kwargs
) -> CreateResult:
- loop = create_event_loop()
- try:
- yield loop.run_until_complete(cls.create_async(model, messages, **kwargs))
- finally:
- loop.close()
+ loop = get_event_loop()
+ coro = cls.create_async(model, messages, **kwargs)
+ yield loop.run_until_complete(coro)
@staticmethod
@abstractmethod
@@ -103,22 +96,19 @@ class AsyncGeneratorProvider(AsyncProvider):
stream: bool = True,
**kwargs
) -> CreateResult:
- loop = create_event_loop()
- try:
- generator = cls.create_async_generator(
- model,
- messages,
- stream=stream,
- **kwargs
- )
- gen = generator.__aiter__()
- while True:
- try:
- yield loop.run_until_complete(gen.__anext__())
- except StopAsyncIteration:
- break
- finally:
- loop.close()
+ loop = get_event_loop()
+ generator = cls.create_async_generator(
+ model,
+ messages,
+ stream=stream,
+ **kwargs
+ )
+ gen = generator.__aiter__()
+ while True:
+ try:
+ yield loop.run_until_complete(gen.__anext__())
+ except StopAsyncIteration:
+ break
@classmethod
async def create_async(
@@ -143,38 +133,4 @@ class AsyncGeneratorProvider(AsyncProvider):
messages: list[dict[str, str]],
**kwargs
) -> AsyncGenerator:
- raise NotImplementedError()
-
-
-# Don't create a new event loop in a running async loop.
-# Force use selector event loop on windows and linux use it anyway.
-def create_event_loop() -> SelectorEventLoop:
- try:
- asyncio.get_running_loop()
- except RuntimeError:
- return SelectorEventLoop()
- raise RuntimeError(
- 'Use "create_async" instead of "create" function in a running event loop.')
-
-
-_cookies = {}
-
-def get_cookies(cookie_domain: str) -> dict:
- if cookie_domain not in _cookies:
- _cookies[cookie_domain] = {}
- try:
- for cookie in browser_cookie3.load(cookie_domain):
- _cookies[cookie_domain][cookie.name] = cookie.value
- except:
- pass
- return _cookies[cookie_domain]
-
-
-def format_prompt(messages: list[dict[str, str]], add_special_tokens=False):
- if add_special_tokens or len(messages) > 1:
- formatted = "\n".join(
- ["%s: %s" % ((message["role"]).capitalize(), message["content"]) for message in messages]
- )
- return f"{formatted}\nAssistant:"
- else:
- return messages[0]["content"] \ No newline at end of file
+ raise NotImplementedError() \ No newline at end of file
diff --git a/g4f/Provider/helper.py b/g4f/Provider/helper.py
new file mode 100644
index 00000000..e14ae65e
--- /dev/null
+++ b/g4f/Provider/helper.py
@@ -0,0 +1,54 @@
+from __future__ import annotations
+
+import asyncio
+import sys
+from asyncio import AbstractEventLoop
+
+import browser_cookie3
+
+_cookies: dict[str, dict[str, str]] = {}
+
+# Use own event_loop_policy with a selector event loop on windows.
+if sys.platform == 'win32':
+ _event_loop_policy = asyncio.WindowsSelectorEventLoopPolicy()
+else:
+ _event_loop_policy = asyncio.get_event_loop_policy()
+
+# If event loop is already running, handle nested event loops
+# If "nest_asyncio" is installed, patch the event loop.
+def get_event_loop() -> AbstractEventLoop:
+ try:
+ asyncio.get_running_loop()
+ except RuntimeError:
+ return _event_loop_policy.get_event_loop()
+ try:
+ event_loop = _event_loop_policy.get_event_loop()
+ if not hasattr(event_loop.__class__, "_nest_patched"):
+ import nest_asyncio
+ nest_asyncio.apply(event_loop)
+ return event_loop
+ except ImportError:
+ raise RuntimeError(
+ 'Use "create_async" instead of "create" function in a running event loop. Or install the "nest_asyncio" package.')
+
+# Load cookies for a domain from all supported browser.
+# Cache the results in the "_cookies" variable
+def get_cookies(cookie_domain: str) -> dict:
+ if cookie_domain not in _cookies:
+ _cookies[cookie_domain] = {}
+ try:
+ for cookie in browser_cookie3.load(cookie_domain):
+ _cookies[cookie_domain][cookie.name] = cookie.value
+ except:
+ pass
+ return _cookies[cookie_domain]
+
+
+def format_prompt(messages: list[dict[str, str]], add_special_tokens=False):
+ if add_special_tokens or len(messages) > 1:
+ formatted = "\n".join(
+ ["%s: %s" % ((message["role"]).capitalize(), message["content"]) for message in messages]
+ )
+ return f"{formatted}\nAssistant:"
+ else:
+ return messages[0]["content"] \ No newline at end of file