summaryrefslogtreecommitdiffstats
path: root/g4f
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--g4f/Provider/AItianhu.py20
-rw-r--r--g4f/Provider/AItianhuSpace.py7
-rw-r--r--g4f/Provider/ChatBase.py13
-rw-r--r--g4f/Provider/ChatForAi.py10
-rw-r--r--g4f/Provider/FreeGpt.py10
-rw-r--r--g4f/Provider/GeekGpt.py14
-rw-r--r--g4f/Provider/Liaobots.py14
-rw-r--r--g4f/Provider/MyShell.py18
-rw-r--r--g4f/Provider/Opchatgpts.py14
-rw-r--r--g4f/Provider/PerplexityAi.py7
-rw-r--r--g4f/Provider/Phind.py50
-rw-r--r--g4f/Provider/TalkAi.py6
-rw-r--r--g4f/Provider/Ylokh.py19
-rw-r--r--g4f/Provider/base_provider.py43
-rw-r--r--g4f/Provider/deprecated/Aibn.py12
-rw-r--r--g4f/Provider/deprecated/Ails.py13
-rw-r--r--g4f/Provider/deprecated/Aivvm.py14
-rw-r--r--g4f/Provider/deprecated/ChatgptDuo.py13
-rw-r--r--g4f/Provider/deprecated/CodeLinkAva.py15
-rw-r--r--g4f/Provider/deprecated/DfeHub.py15
-rw-r--r--g4f/Provider/deprecated/EasyChat.py19
-rw-r--r--g4f/Provider/deprecated/Equing.py13
-rw-r--r--g4f/Provider/deprecated/FastGpt.py13
-rw-r--r--g4f/Provider/deprecated/GetGpt.py16
-rw-r--r--g4f/Provider/deprecated/H2o.py20
-rw-r--r--g4f/Provider/deprecated/Lockchat.py14
-rw-r--r--g4f/Provider/deprecated/Myshell.py12
-rw-r--r--g4f/Provider/deprecated/V50.py15
-rw-r--r--g4f/Provider/deprecated/Vitalentum.py16
-rw-r--r--g4f/Provider/deprecated/Wuguokai.py13
-rw-r--r--g4f/Provider/helper.py84
-rw-r--r--g4f/Provider/needs_auth/Bard.py11
-rw-r--r--g4f/Provider/needs_auth/HuggingChat.py15
-rw-r--r--g4f/Provider/needs_auth/OpenAssistant.py12
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py131
-rw-r--r--g4f/Provider/needs_auth/Poe.py11
-rw-r--r--g4f/Provider/needs_auth/Raycast.py15
-rw-r--r--g4f/Provider/needs_auth/Theb.py29
-rw-r--r--g4f/Provider/webdriver.py92
-rw-r--r--g4f/__init__.py16
40 files changed, 282 insertions, 612 deletions
diff --git a/g4f/Provider/AItianhu.py b/g4f/Provider/AItianhu.py
index fcf9a4fb..34187694 100644
--- a/g4f/Provider/AItianhu.py
+++ b/g4f/Provider/AItianhu.py
@@ -71,21 +71,9 @@ class AItianhu(AsyncGeneratorProvider):
if "detail" not in line:
raise RuntimeError(f"Response: {line}")
-
- content = line["detail"]["choices"][0]["delta"].get("content")
+
+ content = line["detail"]["choices"][0]["delta"].get(
+ "content"
+ )
if content:
yield content
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("proxy", "str"),
- ("temperature", "float"),
- ("top_p", "int"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/AItianhuSpace.py b/g4f/Provider/AItianhuSpace.py
index fabe6b47..95386e8e 100644
--- a/g4f/Provider/AItianhuSpace.py
+++ b/g4f/Provider/AItianhuSpace.py
@@ -5,7 +5,8 @@ import random
from ..typing import CreateResult, Messages
from .base_provider import BaseProvider
-from .helper import WebDriver, WebDriverSession, format_prompt, get_random_string
+from .helper import format_prompt, get_random_string
+from .webdriver import WebDriver, WebDriverSession
from .. import debug
class AItianhuSpace(BaseProvider):
@@ -24,7 +25,7 @@ class AItianhuSpace(BaseProvider):
domain: str = None,
proxy: str = None,
timeout: int = 120,
- web_driver: WebDriver = None,
+ webdriver: WebDriver = None,
headless: bool = True,
**kwargs
) -> CreateResult:
@@ -39,7 +40,7 @@ class AItianhuSpace(BaseProvider):
url = f"https://{domain}"
prompt = format_prompt(messages)
- with WebDriverSession(web_driver, "", headless=headless, proxy=proxy) as driver:
+ with WebDriverSession(webdriver, "", headless=headless, proxy=proxy) as driver:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
diff --git a/g4f/Provider/ChatBase.py b/g4f/Provider/ChatBase.py
index ccc20244..996ca39a 100644
--- a/g4f/Provider/ChatBase.py
+++ b/g4f/Provider/ChatBase.py
@@ -58,15 +58,4 @@ class ChatBase(AsyncGeneratorProvider):
for incorrect_response in cls.list_incorrect_responses:
if incorrect_response in response_data:
raise RuntimeError("Incorrect response")
- yield stream.decode()
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
+ yield stream.decode() \ No newline at end of file
diff --git a/g4f/Provider/ChatForAi.py b/g4f/Provider/ChatForAi.py
index 7a123f0f..afab034b 100644
--- a/g4f/Provider/ChatForAi.py
+++ b/g4f/Provider/ChatForAi.py
@@ -57,16 +57,6 @@ class ChatForAi(AsyncGeneratorProvider):
raise RuntimeError(f"Response: {chunk.decode()}")
yield chunk.decode()
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
def generate_signature(timestamp: int, message: str, id: str):
buffer = f"{timestamp}:{id}:{message}:7YN8z6d6"
diff --git a/g4f/Provider/FreeGpt.py b/g4f/Provider/FreeGpt.py
index 22c6c9aa..15232c8d 100644
--- a/g4f/Provider/FreeGpt.py
+++ b/g4f/Provider/FreeGpt.py
@@ -47,16 +47,6 @@ class FreeGpt(AsyncGeneratorProvider):
raise RuntimeError("Rate limit reached")
yield chunk
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
def generate_signature(timestamp: int, message: str, secret: str = ""):
data = f"{timestamp}:{message}:{secret}"
diff --git a/g4f/Provider/GeekGpt.py b/g4f/Provider/GeekGpt.py
index 8c449745..9ed9c09b 100644
--- a/g4f/Provider/GeekGpt.py
+++ b/g4f/Provider/GeekGpt.py
@@ -70,16 +70,4 @@ class GeekGpt(BaseProvider):
raise RuntimeError(f'error | {e} :', json_data)
if content:
- yield content
-
- @classmethod
- @property
- def params(cls):
- params = [
- ('model', 'str'),
- ('messages', 'list[dict[str, str]]'),
- ('stream', 'bool'),
- ('temperature', 'float'),
- ]
- param = ', '.join([': '.join(p) for p in params])
- return f'g4f.provider.{cls.__name__} supports: ({param})'
+ yield content \ No newline at end of file
diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py
index 109f7e2d..807b4424 100644
--- a/g4f/Provider/Liaobots.py
+++ b/g4f/Provider/Liaobots.py
@@ -97,17 +97,3 @@ class Liaobots(AsyncGeneratorProvider):
async for stream in response.content.iter_any():
if stream:
yield stream.decode()
-
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("proxy", "str"),
- ("auth", "str"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/MyShell.py b/g4f/Provider/MyShell.py
index a1c8d335..5c9c4fe6 100644
--- a/g4f/Provider/MyShell.py
+++ b/g4f/Provider/MyShell.py
@@ -4,7 +4,8 @@ import time, json
from ..typing import CreateResult, Messages
from .base_provider import BaseProvider
-from .helper import WebDriver, WebDriverSession, format_prompt
+from .helper import format_prompt
+from .webdriver import WebDriver, WebDriverSession
class MyShell(BaseProvider):
url = "https://app.myshell.ai/chat"
@@ -20,10 +21,10 @@ class MyShell(BaseProvider):
stream: bool,
proxy: str = None,
timeout: int = 120,
- web_driver: WebDriver = None,
+ webdriver: WebDriver = None,
**kwargs
) -> CreateResult:
- with WebDriverSession(web_driver, "", proxy=proxy) as driver:
+ with WebDriverSession(webdriver, "", proxy=proxy) as driver:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
@@ -52,15 +53,16 @@ response = await fetch("https://api.myshell.ai/v1/bot/chat/send_message", {
"body": '{body}',
"method": "POST"
})
-window.reader = response.body.getReader();
+window._reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
"""
driver.execute_script(script.replace("{body}", json.dumps(data)))
script = """
-chunk = await window.reader.read();
-if (chunk['done']) return null;
-text = (new TextDecoder()).decode(chunk['value']);
+chunk = await window._reader.read();
+if (chunk['done']) {
+ return null;
+}
content = '';
-text.split('\\n').forEach((line, index) => {
+chunk['value'].split('\\n').forEach((line, index) => {
if (line.startsWith('data: ')) {
try {
const data = JSON.parse(line.substring('data: '.length));
diff --git a/g4f/Provider/Opchatgpts.py b/g4f/Provider/Opchatgpts.py
index 8abdf39b..8c2987fa 100644
--- a/g4f/Provider/Opchatgpts.py
+++ b/g4f/Provider/Opchatgpts.py
@@ -56,16 +56,4 @@ class Opchatgpts(AsyncGeneratorProvider):
if line["type"] == "live":
yield line["data"]
elif line["type"] == "end":
- break
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("proxy", "str"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
+ break \ No newline at end of file
diff --git a/g4f/Provider/PerplexityAi.py b/g4f/Provider/PerplexityAi.py
index c0b2412e..03353a95 100644
--- a/g4f/Provider/PerplexityAi.py
+++ b/g4f/Provider/PerplexityAi.py
@@ -4,7 +4,8 @@ import time
from ..typing import CreateResult, Messages
from .base_provider import BaseProvider
-from .helper import WebDriver, WebDriverSession, format_prompt
+from .helper import format_prompt
+from .webdriver import WebDriver, WebDriverSession
class PerplexityAi(BaseProvider):
url = "https://www.perplexity.ai"
@@ -20,12 +21,12 @@ class PerplexityAi(BaseProvider):
stream: bool,
proxy: str = None,
timeout: int = 120,
- web_driver: WebDriver = None,
+ webdriver: WebDriver = None,
virtual_display: bool = True,
copilot: bool = False,
**kwargs
) -> CreateResult:
- with WebDriverSession(web_driver, "", virtual_display=virtual_display, proxy=proxy) as driver:
+ with WebDriverSession(webdriver, "", virtual_display=virtual_display, proxy=proxy) as driver:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
diff --git a/g4f/Provider/Phind.py b/g4f/Provider/Phind.py
index 32f63665..82769ab0 100644
--- a/g4f/Provider/Phind.py
+++ b/g4f/Provider/Phind.py
@@ -5,7 +5,8 @@ from urllib.parse import quote
from ..typing import CreateResult, Messages
from .base_provider import BaseProvider
-from .helper import WebDriver, WebDriverSession, format_prompt
+from .helper import format_prompt
+from .webdriver import WebDriver, WebDriverSession
class Phind(BaseProvider):
url = "https://www.phind.com"
@@ -21,11 +22,11 @@ class Phind(BaseProvider):
stream: bool,
proxy: str = None,
timeout: int = 120,
- web_driver: WebDriver = None,
+ webdriver: WebDriver = None,
creative_mode: bool = None,
**kwargs
) -> CreateResult:
- with WebDriverSession(web_driver, "", proxy=proxy) as driver:
+ with WebDriverSession(webdriver, "", proxy=proxy) as driver:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
@@ -34,40 +35,38 @@ class Phind(BaseProvider):
driver.get(f"{cls.url}/search?q={prompt}&source=searchbox")
# Register fetch hook
- driver.execute_script("""
+ source = """
window._fetch = window.fetch;
-window.fetch = (url, options) => {
- // Call parent fetch method
- const result = window._fetch(url, options);
+window.fetch = async (url, options) => {
+ const response = await window._fetch(url, options);
if (url != "/api/infer/answer") {
- return result;
+ return response;
}
- // Load response reader
- result.then((response) => {
- if (!response.body.locked) {
- window._reader = response.body.getReader();
- }
- });
- // Return dummy response
- return new Promise((resolve, reject) => {
- resolve(new Response(new ReadableStream()))
- });
+ copy = response.clone();
+ window._reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
+ return copy;
}
-""")
+"""
+ driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
+ "source": source
+ })
# Need to change settings
- if model.startswith("gpt-4") or creative_mode:
- wait = WebDriverWait(driver, timeout)
+ wait = WebDriverWait(driver, timeout)
+ def open_dropdown():
# Open settings dropdown
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "button.text-dark.dropdown-toggle")))
driver.find_element(By.CSS_SELECTOR, "button.text-dark.dropdown-toggle").click()
# Wait for dropdown toggle
wait.until(EC.visibility_of_element_located((By.XPATH, "//button[text()='GPT-4']")))
- # Enable GPT-4
+ if model.startswith("gpt-4") or creative_mode:
+ # Enable GPT-4
if model.startswith("gpt-4"):
+ open_dropdown()
driver.find_element(By.XPATH, "//button[text()='GPT-4']").click()
# Enable creative mode
if creative_mode or creative_mode == None:
+ open_dropdown()
driver.find_element(By.ID, "Creative Mode").click()
# Submit changes
driver.find_element(By.CSS_SELECTOR, ".search-bar-input-group button[type='submit']").click()
@@ -78,10 +77,11 @@ window.fetch = (url, options) => {
chunk = driver.execute_script("""
if(window._reader) {
chunk = await window._reader.read();
- if (chunk['done']) return null;
- text = (new TextDecoder()).decode(chunk['value']);
+ if (chunk['done']) {
+ return null;
+ }
content = '';
- text.split('\\r\\n').forEach((line, index) => {
+ chunk['value'].split('\\r\\n').forEach((line, index) => {
if (line.startsWith('data: ')) {
line = line.substring('data: '.length);
if (!line.startsWith('<PHIND_METADATA>')) {
diff --git a/g4f/Provider/TalkAi.py b/g4f/Provider/TalkAi.py
index 20ba65b5..0edd9f6b 100644
--- a/g4f/Provider/TalkAi.py
+++ b/g4f/Provider/TalkAi.py
@@ -4,7 +4,7 @@ import time, json, time
from ..typing import CreateResult, Messages
from .base_provider import BaseProvider
-from .helper import WebDriver, WebDriverSession
+from .webdriver import WebDriver, WebDriverSession
class TalkAi(BaseProvider):
url = "https://talkai.info"
@@ -19,10 +19,10 @@ class TalkAi(BaseProvider):
messages: Messages,
stream: bool,
proxy: str = None,
- web_driver: WebDriver = None,
+ webdriver: WebDriver = None,
**kwargs
) -> CreateResult:
- with WebDriverSession(web_driver, "", virtual_display=True, proxy=proxy) as driver:
+ with WebDriverSession(webdriver, "", virtual_display=True, proxy=proxy) as driver:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
diff --git a/g4f/Provider/Ylokh.py b/g4f/Provider/Ylokh.py
index 13692139..11fe497f 100644
--- a/g4f/Provider/Ylokh.py
+++ b/g4f/Provider/Ylokh.py
@@ -55,21 +55,4 @@ class Ylokh(AsyncGeneratorProvider):
yield content
else:
chat = await response.json()
- yield chat["choices"][0]["message"].get("content")
-
-
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("proxy", "str"),
- ("timeout", "int"),
- ("temperature", "float"),
- ("top_p", "float"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
+ yield chat["choices"][0]["message"].get("content") \ No newline at end of file
diff --git a/g4f/Provider/base_provider.py b/g4f/Provider/base_provider.py
index 47ea6ff8..564dd77e 100644
--- a/g4f/Provider/base_provider.py
+++ b/g4f/Provider/base_provider.py
@@ -3,6 +3,8 @@ from __future__ import annotations
from asyncio import AbstractEventLoop
from concurrent.futures import ThreadPoolExecutor
from abc import ABC, abstractmethod
+from inspect import signature, Parameter
+from types import NoneType
from .helper import get_event_loop, get_cookies, format_prompt
from ..typing import CreateResult, AsyncResult, Messages
@@ -52,17 +54,42 @@ class BaseProvider(ABC):
executor,
create_func
)
-
+
@classmethod
@property
def params(cls) -> str:
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
+ if issubclass(cls, AsyncGeneratorProvider):
+ sig = signature(cls.create_async_generator)
+ elif issubclass(cls, AsyncProvider):
+ sig = signature(cls.create_async)
+ else:
+ sig = signature(cls.create_completion)
+
+ def get_type_name(annotation: type) -> str:
+ if hasattr(annotation, "__name__"):
+ annotation = annotation.__name__
+ elif isinstance(annotation, NoneType):
+ annotation = "None"
+ return str(annotation)
+
+ args = "";
+ for name, param in sig.parameters.items():
+ if name in ("self", "kwargs"):
+ continue
+ if name == "stream" and not cls.supports_stream:
+ continue
+ if args:
+ args += ", "
+ args += "\n"
+ args += " " + name
+ if name != "model" and param.annotation is not Parameter.empty:
+ args += f": {get_type_name(param.annotation)}"
+ if param.default == "":
+ args += ' = ""'
+ elif param.default is not Parameter.empty:
+ args += f" = {param.default}"
+
+ return f"g4f.Provider.{cls.__name__} supports: ({args}\n)"
class AsyncProvider(BaseProvider):
diff --git a/g4f/Provider/deprecated/Aibn.py b/g4f/Provider/deprecated/Aibn.py
index 60cef1e4..0bbfb436 100644
--- a/g4f/Provider/deprecated/Aibn.py
+++ b/g4f/Provider/deprecated/Aibn.py
@@ -39,18 +39,6 @@ class Aibn(AsyncGeneratorProvider):
response.raise_for_status()
async for chunk in response.iter_content():
yield chunk.decode()
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("temperature", "float"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
def generate_signature(timestamp: int, message: str, secret: str = "undefined"):
diff --git a/g4f/Provider/deprecated/Ails.py b/g4f/Provider/deprecated/Ails.py
index 5244fd75..e87ceb32 100644
--- a/g4f/Provider/deprecated/Ails.py
+++ b/g4f/Provider/deprecated/Ails.py
@@ -77,19 +77,6 @@ class Ails(AsyncGeneratorProvider):
yield token
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("temperature", "float"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
-
-
def _hash(json_data: dict[str, str]) -> SHA256:
base_string: str = f'{json_data["t"]}:{json_data["m"]}:WI,2rU#_r:r~aF4aJ36[.Z(/8Rv93Rf:{len(json_data["m"])}'
diff --git a/g4f/Provider/deprecated/Aivvm.py b/g4f/Provider/deprecated/Aivvm.py
index 12fd387d..8b5a9e05 100644
--- a/g4f/Provider/deprecated/Aivvm.py
+++ b/g4f/Provider/deprecated/Aivvm.py
@@ -69,16 +69,4 @@ class Aivvm(BaseProvider):
try:
yield chunk.decode("utf-8")
except UnicodeDecodeError:
- yield chunk.decode("unicode-escape")
-
- @classmethod
- @property
- def params(cls):
- params = [
- ('model', 'str'),
- ('messages', 'list[dict[str, str]]'),
- ('stream', 'bool'),
- ('temperature', 'float'),
- ]
- param = ', '.join([': '.join(p) for p in params])
- return f'g4f.provider.{cls.__name__} supports: ({param})'
+ yield chunk.decode("unicode-escape") \ No newline at end of file
diff --git a/g4f/Provider/deprecated/ChatgptDuo.py b/g4f/Provider/deprecated/ChatgptDuo.py
index c77c6a1c..c2d2de7a 100644
--- a/g4f/Provider/deprecated/ChatgptDuo.py
+++ b/g4f/Provider/deprecated/ChatgptDuo.py
@@ -44,15 +44,4 @@ class ChatgptDuo(AsyncProvider):
@classmethod
def get_sources(cls):
- return cls._sources
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
+ return cls._sources \ No newline at end of file
diff --git a/g4f/Provider/deprecated/CodeLinkAva.py b/g4f/Provider/deprecated/CodeLinkAva.py
index 64ce1af9..a909ab97 100644
--- a/g4f/Provider/deprecated/CodeLinkAva.py
+++ b/g4f/Provider/deprecated/CodeLinkAva.py
@@ -47,17 +47,4 @@ class CodeLinkAva(AsyncGeneratorProvider):
break
line = json.loads(line[6:-1])
if content := line["choices"][0]["delta"].get("content"):
- yield content
-
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("temperature", "float"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
+ yield content \ No newline at end of file
diff --git a/g4f/Provider/deprecated/DfeHub.py b/g4f/Provider/deprecated/DfeHub.py
index 4ea7501f..4458bac6 100644
--- a/g4f/Provider/deprecated/DfeHub.py
+++ b/g4f/Provider/deprecated/DfeHub.py
@@ -60,18 +60,3 @@ class DfeHub(BaseProvider):
if b"content" in chunk:
data = json.loads(chunk.decode().split("data: ")[1])
yield (data["choices"][0]["delta"]["content"])
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("temperature", "float"),
- ("presence_penalty", "int"),
- ("frequency_penalty", "int"),
- ("top_p", "int"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/deprecated/EasyChat.py b/g4f/Provider/deprecated/EasyChat.py
index bd49c09c..3142f243 100644
--- a/g4f/Provider/deprecated/EasyChat.py
+++ b/g4f/Provider/deprecated/EasyChat.py
@@ -87,21 +87,4 @@ class EasyChat(BaseProvider):
splitData = chunk.decode().split("data:")
if len(splitData) > 1:
- yield json.loads(splitData[1])["choices"][0]["delta"]["content"]
-
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("temperature", "float"),
- ("presence_penalty", "int"),
- ("frequency_penalty", "int"),
- ("top_p", "int"),
- ("active_server", "int"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
+ yield json.loads(splitData[1])["choices"][0]["delta"]["content"] \ No newline at end of file
diff --git a/g4f/Provider/deprecated/Equing.py b/g4f/Provider/deprecated/Equing.py
index 5ba125a3..076b5ac5 100644
--- a/g4f/Provider/deprecated/Equing.py
+++ b/g4f/Provider/deprecated/Equing.py
@@ -66,15 +66,4 @@ class Equing(BaseProvider):
if b'content' in line:
line_json = json.loads(line.decode('utf-8').split('data: ')[1])
if token := line_json['choices'][0]['delta'].get('content'):
- yield token
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
+ yield token \ No newline at end of file
diff --git a/g4f/Provider/deprecated/FastGpt.py b/g4f/Provider/deprecated/FastGpt.py
index 17b21b37..ef69e892 100644
--- a/g4f/Provider/deprecated/FastGpt.py
+++ b/g4f/Provider/deprecated/FastGpt.py
@@ -74,15 +74,4 @@ class FastGpt(BaseProvider):
):
yield token
except:
- continue
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
+ continue \ No newline at end of file
diff --git a/g4f/Provider/deprecated/GetGpt.py b/g4f/Provider/deprecated/GetGpt.py
index 0fbb5b87..a7f4695c 100644
--- a/g4f/Provider/deprecated/GetGpt.py
+++ b/g4f/Provider/deprecated/GetGpt.py
@@ -55,22 +55,6 @@ class GetGpt(BaseProvider):
line_json = json.loads(line.decode('utf-8').split('data: ')[1])
yield (line_json['choices'][0]['delta']['content'])
- @classmethod
- @property
- def params(cls):
- params = [
- ('model', 'str'),
- ('messages', 'list[dict[str, str]]'),
- ('stream', 'bool'),
- ('temperature', 'float'),
- ('presence_penalty', 'int'),
- ('frequency_penalty', 'int'),
- ('top_p', 'int'),
- ('max_tokens', 'int'),
- ]
- param = ', '.join([': '.join(p) for p in params])
- return f'g4f.provider.{cls.__name__} supports: ({param})'
-
def _encrypt(e: str):
t = os.urandom(8).hex().encode('utf-8')
diff --git a/g4f/Provider/deprecated/H2o.py b/g4f/Provider/deprecated/H2o.py
index cead17e1..ba4ca507 100644
--- a/g4f/Provider/deprecated/H2o.py
+++ b/g4f/Provider/deprecated/H2o.py
@@ -86,22 +86,4 @@ class H2o(AsyncGeneratorProvider):
f"{cls.url}/conversation/{conversationId}",
proxy=proxy,
) as response:
- response.raise_for_status()
-
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("temperature", "float"),
- ("truncate", "int"),
- ("max_new_tokens", "int"),
- ("do_sample", "bool"),
- ("repetition_penalty", "float"),
- ("return_full_text", "bool"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
+ response.raise_for_status() \ No newline at end of file
diff --git a/g4f/Provider/deprecated/Lockchat.py b/g4f/Provider/deprecated/Lockchat.py
index 5acfbfbf..d93c9f8a 100644
--- a/g4f/Provider/deprecated/Lockchat.py
+++ b/g4f/Provider/deprecated/Lockchat.py
@@ -48,16 +48,4 @@ class Lockchat(BaseProvider):
if b"content" in token:
token = json.loads(token.decode("utf-8").split("data: ")[1])
if token := token["choices"][0]["delta"].get("content"):
- yield (token)
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("temperature", "float"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
+ yield (token) \ No newline at end of file
diff --git a/g4f/Provider/deprecated/Myshell.py b/g4f/Provider/deprecated/Myshell.py
index 85731325..2487440d 100644
--- a/g4f/Provider/deprecated/Myshell.py
+++ b/g4f/Provider/deprecated/Myshell.py
@@ -98,18 +98,6 @@ class Myshell(AsyncGeneratorProvider):
raise RuntimeError(f"Received unexpected message: {data_type}")
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
-
-
def generate_timestamp() -> str:
return str(
int(
diff --git a/g4f/Provider/deprecated/V50.py b/g4f/Provider/deprecated/V50.py
index f4f4d823..e24ac2d4 100644
--- a/g4f/Provider/deprecated/V50.py
+++ b/g4f/Provider/deprecated/V50.py
@@ -58,17 +58,4 @@ class V50(BaseProvider):
)
if "https://fk1.v50.ltd" not in response.text:
- yield response.text
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("temperature", "float"),
- ("top_p", "int"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
+ yield response.text \ No newline at end of file
diff --git a/g4f/Provider/deprecated/Vitalentum.py b/g4f/Provider/deprecated/Vitalentum.py
index d6ba9336..13160d94 100644
--- a/g4f/Provider/deprecated/Vitalentum.py
+++ b/g4f/Provider/deprecated/Vitalentum.py
@@ -50,18 +50,4 @@ class Vitalentum(AsyncGeneratorProvider):
break
line = json.loads(line[6:-1])
if content := line["choices"][0]["delta"].get("content"):
- yield content
-
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("proxy", "str"),
- ("temperature", "float"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
+ yield content \ No newline at end of file
diff --git a/g4f/Provider/deprecated/Wuguokai.py b/g4f/Provider/deprecated/Wuguokai.py
index 079f0541..87877198 100644
--- a/g4f/Provider/deprecated/Wuguokai.py
+++ b/g4f/Provider/deprecated/Wuguokai.py
@@ -54,15 +54,4 @@ class Wuguokai(BaseProvider):
if len(_split) > 1:
yield _split[1].strip()
else:
- yield _split[0].strip()
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool")
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
+ yield _split[0].strip() \ No newline at end of file
diff --git a/g4f/Provider/helper.py b/g4f/Provider/helper.py
index 03e9ba94..2171f0b7 100644
--- a/g4f/Provider/helper.py
+++ b/g4f/Provider/helper.py
@@ -6,7 +6,6 @@ import webbrowser
import random
import string
import secrets
-import time
from os import path
from asyncio import AbstractEventLoop
from platformdirs import user_config_dir
@@ -21,26 +20,8 @@ from browser_cookie3 import (
firefox,
BrowserCookieError
)
-try:
- from selenium.webdriver.remote.webdriver import WebDriver
-except ImportError:
- class WebDriver():
- pass
-try:
- from undetected_chromedriver import Chrome, ChromeOptions
-except ImportError:
- class Chrome():
- def __init__():
- raise RuntimeError('Please install the "undetected_chromedriver" package')
- class ChromeOptions():
- def add_argument():
- pass
-try:
- from pyvirtualdisplay import Display
-except ImportError:
- pass
-from ..typing import Dict, Messages, Union, Tuple
+from ..typing import Dict, Messages
from .. import debug
# Change event loop policy on windows
@@ -135,74 +116,11 @@ def format_prompt(messages: Messages, add_special_tokens=False) -> str:
return f"{formatted}\nAssistant:"
-def get_browser(
- user_data_dir: str = None,
- headless: bool = False,
- proxy: str = None,
- options: ChromeOptions = None
-) -> Chrome:
- if user_data_dir == None:
- user_data_dir = user_config_dir("g4f")
- if proxy:
- if not options:
- options = ChromeOptions()
- options.add_argument(f'--proxy-server={proxy}')
- return Chrome(options=options, user_data_dir=user_data_dir, headless=headless)
-
-class WebDriverSession():
- def __init__(
- self,
- web_driver: WebDriver = None,
- user_data_dir: str = None,
- headless: bool = False,
- virtual_display: bool = False,
- proxy: str = None,
- options: ChromeOptions = None
- ):
- self.web_driver = web_driver
- self.user_data_dir = user_data_dir
- self.headless = headless
- self.virtual_display = virtual_display
- self.proxy = proxy
- self.options = options
-
- def reopen(
- self,
- user_data_dir: str = None,
- headless: bool = False,
- virtual_display: bool = False
- ) -> WebDriver:
- if user_data_dir == None:
- user_data_dir = self.user_data_dir
- self.default_driver.quit()
- if not virtual_display and self.virtual_display:
- self.virtual_display.stop()
- self.default_driver = get_browser(user_data_dir, headless, self.proxy)
- return self.default_driver
-
- def __enter__(self) -> WebDriver:
- if self.web_driver:
- return self.web_driver
- if self.virtual_display == True:
- self.virtual_display = Display(size=(1920,1080))
- self.virtual_display.start()
- self.default_driver = get_browser(self.user_data_dir, self.headless, self.proxy, self.options)
- return self.default_driver
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- if self.default_driver:
- self.default_driver.close()
- time.sleep(0.1)
- self.default_driver.quit()
- if self.virtual_display:
- self.virtual_display.stop()
-
def get_random_string(length: int = 10) -> str:
return ''.join(
random.choice(string.ascii_lowercase + string.digits)
for _ in range(length)
)
-
def get_random_hex() -> str:
return secrets.token_hex(16).zfill(32) \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/Bard.py b/g4f/Provider/needs_auth/Bard.py
index 77c029b8..2c1f6121 100644
--- a/g4f/Provider/needs_auth/Bard.py
+++ b/g4f/Provider/needs_auth/Bard.py
@@ -4,7 +4,8 @@ import time
from ...typing import CreateResult, Messages
from ..base_provider import BaseProvider
-from ..helper import WebDriver, WebDriverSession, format_prompt
+from ..helper import format_prompt
+from ..webdriver import WebDriver, WebDriverSession
class Bard(BaseProvider):
url = "https://bard.google.com"
@@ -18,13 +19,13 @@ class Bard(BaseProvider):
messages: Messages,
stream: bool,
proxy: str = None,
- web_driver: WebDriver = None,
+ webdriver: WebDriver = None,
user_data_dir: str = None,
headless: bool = True,
**kwargs
) -> CreateResult:
prompt = format_prompt(messages)
- session = WebDriverSession(web_driver, user_data_dir, headless, proxy=proxy)
+ session = WebDriverSession(webdriver, user_data_dir, headless, proxy=proxy)
with session as driver:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
@@ -36,8 +37,8 @@ class Bard(BaseProvider):
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea")))
except:
# Reopen browser for login
- if not web_driver:
- driver = session.reopen(headless=False)
+ if not webdriver:
+ driver = session.reopen()
driver.get(f"{cls.url}/chat")
wait = WebDriverWait(driver, 240)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea")))
diff --git a/g4f/Provider/needs_auth/HuggingChat.py b/g4f/Provider/needs_auth/HuggingChat.py
index 68c6713b..59e2da73 100644
--- a/g4f/Provider/needs_auth/HuggingChat.py
+++ b/g4f/Provider/needs_auth/HuggingChat.py
@@ -59,17 +59,4 @@ class HuggingChat(AsyncGeneratorProvider):
break
async with session.delete(f"{cls.url}/conversation/{conversation_id}", proxy=proxy) as response:
- response.raise_for_status()
-
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("proxy", "str"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
+ response.raise_for_status() \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/OpenAssistant.py b/g4f/Provider/needs_auth/OpenAssistant.py
index de62636c..e549b517 100644
--- a/g4f/Provider/needs_auth/OpenAssistant.py
+++ b/g4f/Provider/needs_auth/OpenAssistant.py
@@ -87,15 +87,3 @@ class OpenAssistant(AsyncGeneratorProvider):
}
async with session.delete("https://open-assistant.io/api/chat", proxy=proxy, params=params) as response:
response.raise_for_status()
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("proxy", "str"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index 9fd90812..8c9dd1e0 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -6,7 +6,8 @@ from asyncstdlib.itertools import tee
from async_property import async_cached_property
from ..base_provider import AsyncGeneratorProvider
-from ..helper import get_browser, get_event_loop
+from ..helper import get_event_loop
+from ..webdriver import get_browser
from ...typing import AsyncResult, Messages
from ...requests import StreamSession
@@ -38,7 +39,10 @@ class OpenaiChat(AsyncGeneratorProvider):
**kwargs
) -> Response:
if prompt:
- messages.append({"role": "user", "content": prompt})
+ messages.append({
+ "role": "user",
+ "content": prompt
+ })
generator = cls.create_async_generator(
model,
messages,
@@ -49,12 +53,9 @@ class OpenaiChat(AsyncGeneratorProvider):
response_fields=True,
**kwargs
)
- fields: ResponseFields = await anext(generator)
- if "access_token" not in kwargs:
- kwargs["access_token"] = cls._access_token
return Response(
generator,
- fields,
+ await anext(generator),
action,
messages,
kwargs
@@ -87,7 +88,6 @@ class OpenaiChat(AsyncGeneratorProvider):
headers = {
"Accept": "text/event-stream",
"Authorization": f"Bearer {access_token}",
- "Cookie": 'intercom-device-id-dgkjq2bp=0f047573-a750-46c8-be62-6d54b56e7bf0; ajs_user_id=user-iv3vxisaoNodwWpxmNpMfekH; ajs_anonymous_id=fd91be0b-0251-4222-ac1e-84b1071e9ec1; __Host-next-auth.csrf-token=d2b5f67d56f7dd6a0a42ae4becf2d1a6577b820a5edc88ab2018a59b9b506886%7Ce5c33eecc460988a137cbc72d90ee18f1b4e2f672104f368046df58e364376ac; _cfuvid=gt_mA.q6rue1.7d2.AR0KHpbVBS98i_ppfi.amj2._o-1700353424353-0-604800000; cf_clearance=GkHCfPSFU.NXGcHROoe4FantnqmnNcluhTNHz13Tk.M-1700353425-0-1-dfe77f81.816e9bc2.714615da-0.2.1700353425; __Secure-next-auth.callback-url=https%3A%2F%2Fchat.openai.com; intercom-session-dgkjq2bp=UWdrS1hHazk5VXN1c0V5Q1F0VXdCQmsyTU9pVjJMUkNpWnFnU3dKWmtIdGwxTC9wbjZuMk5hcEc0NWZDOGdndS0tSDNiaDNmMEdIL1RHU1dFWDBwOHFJUT09--f754361b91fddcd23a13b288dcb2bf8c7f509e91; _uasid="Z0FBQUFBQmxXVnV0a3dmVno4czRhcDc2ZVcwaUpSNUdZejlDR25YSk5NYTJQQkpyNmRvOGxjTHMyTlAxWmJhaURrMVhjLXZxQXdZeVpBbU1aczA5WUpHT2dwaS1MOWc4MnhyNWFnbGRzeGdJcGFKT0ZRdnBTMVJHcGV2MGNTSnVQY193c0hqUWIycHhQRVF4dENlZ3phcDdZeHgxdVhoalhrZmtZME9NbWhMQjdVR3Vzc3FRRk0ybjJjNWMwTWtIRjdPb19lUkFtRmV2MDVqd1kwWU11QTYtQkdZenEzVHhLMGplY1hZM3FlYUt1cVZaNWFTRldleEJETzJKQjk1VTJScy1GUnMxUVZWMnVxYklxMjdockVZbkZyd1R4U1RtMnA1ZzlSeXphdmVOVk9xeEdrRkVOSjhwTVd1QzFtQjhBcWdDaE92Q1VlM2pwcjFQTXRuLVJNRVlZSGpIdlZ0aGV3PT0="; _dd_s=rum=0&expire=1700356244884; __Secure-next-auth.session-token=eyJhbGciOiJkaXIiLCJlbmMiOiJBMjU2R0NNIn0..3aK6Fbdy2_8f07bf.8eT2xgonrCnz7ySY6qXFsg3kzL6UQfXKAYaw3tyn-6_X9657zy47k9qGvmi9mF0QKozj5jau3_Ca62AQQ7FmeC6Y2F1urtzqrXqwTTsQ2LuzFPIQkx6KKb2DXc8zW2-oyEzJ_EY5yxfLB2RlRkSh3M7bYNZh4_ltEcfkj38s_kIPGMxv34udtPWGWET99MCjkdwQWXylJag4s0fETA0orsBAKnGCyqAUNJbb_D7BYtGSV-MQ925kZMG6Di_QmfO0HQWURDYjmdRNcuy1PT_xJ1DJko8sjL42i4j3RhkNDkhqCIqyYImz2eHFWHW7rYKxTkrBhlCPMS5hRdcCswD7JYPcSBiwnVRYgyOocFGXoFvQgIZ2FX9NiZ3SMEVM1VwIGSE-qH0H2nMa8_iBvsOgOWJgKjVAvzzyzZvRVDUUHzJrikSFPNONVDU3h-04c1kVL4qIu9DfeTPN7n8AvNmYwMbro0L9-IUAeXNo4-pwF0Kt-AtTsamqWvMqnK4O_YOyLnDDlvkmnOvDC2d5uinwlQIxr6APO6qFfGLlHiLZemKoekxEE1Fx70dl-Ouhk1VIzbF3OC6XNNxeBm9BUYUiHdL0wj2H9rHgX4cz6ZmS_3VTgpD6UJh-evu5KJ2gIvjYmVbyzEN0aPNDxfvBaOm-Ezpy4bUJ2bUrOwNn-0knWkDiTvjYmNhCyefPCtCF6rpKNay8PCw_yh79C4SdEP6Q4V7LI0Tvdi5uz7kLCiBC4AT9L0ao1WDX03mkUOpjvzHDvPLmj8chW3lTVm_kA0eYGQY4wT0jzleWlfV0Q8rB2oYECNLWksA3F1zlGfcl4lQjprvTXRePkvAbMpoJEsZD3Ylq7-foLDLk4-M2LYAFZDs282AY04sFjAjQBxTELFCCuDgTIgTXSIskY_XCxpVXDbdLlbCJY7XVK45ybwtfqwlKRp8Mo0B131uQAFc-migHaUaoGujxJJk21bP8F0OmhNYHBo4FQqE1rQm2JH5bNM7txKeh5KXdJgVUVbRSr7OIp_OF5-Bx_v9eRBGAIDkue26E2-O8Rnrp5zQ5TnvecQLDaUzWavCLPwsZ0_gsOLBxNOmauNYZtF8IElCsQSFDdhoiMxXsYUm4ZYKEAy3GWq8HGTAvBhNkh1hvnI7y-d8-DOaZf_D_D98-olZfm-LUkeosLNpPB9rxYMqViCiW3KrXE9Yx0wlFm5ePKaVvR7Ym_EPhSOhJBKFPCvdTdMZSNPUcW0ZJBVByq0A9sxD51lYq3gaFyqh94S4s_ox182AQ3szGzHkdgLcnQmJG9OYvKxAVcd43eg6_gODAYhx02GjbMw-7JTAhyXSeCrlMteHyOXl8hai-3LilC3PmMzi7Vbu49dhF1s4LcVlUowen5ira44rQQaB26mdaOUoQfodgt66M3RTWGPXyK1Nb72AzSXsCKyaQPbzeb6cN0fdGSdG4ktwvR04eFNEkquo_3aKu2GmUKTD0XcRx9dYrfXjgY-X1DDTVs1YND2gRhdx7FFEeBVjtbj2UqmG3Rvd4IcHGe7OnYWw2MHDcol68SsR1KckXWwWREz7YTGUnDB2M1kx_H4W2mjclytnlHOnYU3RflegRPeSTbdzUZJvGKXCCz45luHkQWN_4DExE76D-9YqbFIz-RY5yL4h-Zs-i2xjm2K-4xCMM9nQIOqhLMqixIZQ2ldDAidKoYtbs5ppzbcBLyrZM96bq9DwRBY3aacqWdlRd-TfX0wv5KO4fo0sSh5FsuhuN0zcEV_NNXgqIEM_p14EcPqgbrAvCBQ8os70TRBQLXiF0EniSofGjxwF8kQvUk3C6Wfc8cTTeN-E6GxCVTn91HBwA1iSEZlRLMVb8_BcRJNqwbgnb_07jR6-eo42u88CR3KQdAWwbQRdMxsURFwZ0ujHXVGG0Ll6qCFBcHXWyDO1x1yHdHnw8_8yF26pnA2iPzrFR-8glMgIA-639sLuGAxjO1_ZuvJ9CAB41Az9S_jaZwaWy215Hk4-BRYD-MKmHtonwo3rrxhE67WJgbbu14efsw5nT6ow961pffgwXov5VA1Rg7nv1E8RvQOx7umWW6o8R4W6L8f2COsmPTXfgwIjoJKkjhUqAQ8ceG7cM0ET-38yaC0ObU8EkXfdGGgxI28qTEZWczG66_iM4hw7QEGCY5Cz2kbO6LETAiw9OsSigtBvDS7f0Ou0bZ41pdK7G3FmvdZAnjWPjObnDF4k4uWfn7mzt0fgj3FyqK20JezRDyGuAbUUhOvtZpc9sJpzxR34eXEZTouuALrHcGuNij4z6rx51FrQsaMtiup8QVrhtZbXtKLMYnWYSbkhuTeN2wY-xV1ZUsQlakIZszzGF7kuIG87KKWMpuPMvbXjz6Pp_gWJiIC6aQuk8xl5g0iBPycf_6Q-MtpuYxzNE2TpI1RyR9mHeXmteoRzrFiWp7yEC-QGNFyAJgxTqxM3CjHh1Jt6IddOsmn89rUo1dZM2Smijv_fbIv3avXLkIPX1KZjILeJCtpU0wAdsihDaRiRgDdx8fG__F8zuP0n7ziHas73cwrfg-Ujr6DhC0gTNxyd9dDA_oho9N7CQcy6EFmfNF2te7zpLony0859jtRv2t1TnpzAa1VvMK4u6mXuJ2XDo04_6GzLO3aPHinMdl1BcIAWnqAqWAu3euGFLTHOhXlfijut9N1OCifd_zWjhVtzlR39uFeCQBU5DyQArzQurdoMx8U1ETsnWgElxGSStRW-YQoPsAJ87eg9trqKspFpTVlAVN3t1GtoEAEhcwhe81SDssLmKGLc.7PqS6jRGTIfgTPlO7Ognvg; __cf_bm=VMWoAKEB45hQSwxXtnYXcurPaGZDJS4dMi6dIMFLwdw-1700355394-0-ATVsbq97iCaTaJbtYr8vtg1Zlbs3nLrJLKVBHYa2Jn7hhkGclqAy8Gbyn5ePEhDRqj93MsQmtayfYLqY5n4WiLY=; __cflb=0H28vVfF4aAyg2hkHFH9CkdHRXPsfCUf6VpYf2kz3RX'
}
async with StreamSession(
proxies={"https": proxy},
@@ -95,24 +95,22 @@ class OpenaiChat(AsyncGeneratorProvider):
headers=headers,
timeout=timeout
) as session:
- data = {
- "action": action,
- "arkose_token": await get_arkose_token(proxy, timeout),
- "conversation_id": conversation_id,
- "parent_message_id": parent_id,
- "model": models[model],
- "history_and_training_disabled": history_disabled and not auto_continue,
- }
- if action != "continue":
- data["messages"] = [{
- "id": str(uuid.uuid4()),
- "author": {"role": "user"},
- "content": {"content_type": "text", "parts": [messages[-1]["content"]]},
- }]
- first = True
end_turn = EndTurn()
- while first or auto_continue and not end_turn.is_end:
- first = False
+ while not end_turn.is_end:
+ data = {
+ "action": action,
+ "arkose_token": await get_arkose_token(proxy, timeout),
+ "conversation_id": conversation_id,
+ "parent_message_id": parent_id,
+ "model": models[model],
+ "history_and_training_disabled": history_disabled and not auto_continue,
+ }
+ if action != "continue":
+ data["messages"] = [{
+ "id": str(uuid.uuid4()),
+ "author": {"role": "user"},
+ "content": {"content_type": "text", "parts": [messages[-1]["content"]]},
+ }]
async with session.post(f"{cls.url}/backend-api/conversation", json=data) as response:
try:
response.raise_for_status()
@@ -120,43 +118,38 @@ class OpenaiChat(AsyncGeneratorProvider):
raise RuntimeError(f"Error {response.status_code}: {await response.text()}")
last_message = 0
async for line in response.iter_lines():
- if line.startswith(b"data: "):
- line = line[6:]
- if line == b"[DONE]":
- break
- try:
- line = json.loads(line)
- except:
- continue
- if "message" not in line:
- continue
- if "error" in line and line["error"]:
- raise RuntimeError(line["error"])
- if "message_type" not in line["message"]["metadata"]:
- continue
- if line["message"]["author"]["role"] != "assistant":
- continue
- if line["message"]["metadata"]["message_type"] in ("next", "continue", "variant"):
- conversation_id = line["conversation_id"]
- parent_id = line["message"]["id"]
- if response_fields:
- response_fields = False
- yield ResponseFields(conversation_id, parent_id, end_turn)
- new_message = line["message"]["content"]["parts"][0]
- yield new_message[last_message:]
- last_message = len(new_message)
- if "finish_details" in line["message"]["metadata"]:
- if line["message"]["metadata"]["finish_details"]["type"] == "max_tokens":
- end_turn.end()
-
- data = {
- "action": "continue",
- "arkose_token": await get_arkose_token(proxy, timeout),
- "conversation_id": conversation_id,
- "parent_message_id": parent_id,
- "model": models[model],
- "history_and_training_disabled": False,
- }
+ if not line.startswith(b"data: "):
+ continue
+ line = line[6:]
+ if line == b"[DONE]":
+ break
+ try:
+ line = json.loads(line)
+ except:
+ continue
+ if "message" not in line:
+ continue
+ if "error" in line and line["error"]:
+ raise RuntimeError(line["error"])
+ if "message_type" not in line["message"]["metadata"]:
+ continue
+ if line["message"]["author"]["role"] != "assistant":
+ continue
+ if line["message"]["metadata"]["message_type"] in ("next", "continue", "variant"):
+ conversation_id = line["conversation_id"]
+ parent_id = line["message"]["id"]
+ if response_fields:
+ response_fields = False
+ yield ResponseFields(conversation_id, parent_id, end_turn)
+ new_message = line["message"]["content"]["parts"][0]
+ yield new_message[last_message:]
+ last_message = len(new_message)
+ if "finish_details" in line["message"]["metadata"]:
+ if line["message"]["metadata"]["finish_details"]["type"] == "stop":
+ end_turn.end()
+ if not auto_continue:
+ break
+ action = "continue"
await asyncio.sleep(5)
@classmethod
@@ -167,7 +160,7 @@ class OpenaiChat(AsyncGeneratorProvider):
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
- driver = get_browser("~/openai", proxy=proxy)
+ driver = get_browser(proxy=proxy)
except ImportError:
return
try:
@@ -193,18 +186,6 @@ class OpenaiChat(AsyncGeneratorProvider):
raise RuntimeError("Read access token failed")
return cls._access_token
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("proxy", "str"),
- ("access_token", "str"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
async def get_arkose_token(proxy: str = None, timeout: int = None) -> str:
config = {
@@ -293,7 +274,7 @@ class Response():
async def variant(self, **kwargs) -> Response:
if self.action != "next":
- raise RuntimeError("Can't create variant with continue or variant request.")
+ raise RuntimeError("Can't create variant from continue or variant request.")
return await OpenaiChat.create(
**self._options,
messages=self._messages,
diff --git a/g4f/Provider/needs_auth/Poe.py b/g4f/Provider/needs_auth/Poe.py
index 1c8c97d7..99f6945b 100644
--- a/g4f/Provider/needs_auth/Poe.py
+++ b/g4f/Provider/needs_auth/Poe.py
@@ -4,7 +4,8 @@ import time
from ...typing import CreateResult, Messages
from ..base_provider import BaseProvider
-from ..helper import WebDriver, WebDriverSession, format_prompt
+from ..helper import format_prompt
+from ..webdriver import WebDriver, WebDriverSession
models = {
"meta-llama/Llama-2-7b-chat-hf": {"name": "Llama-2-7b"},
@@ -33,7 +34,7 @@ class Poe(BaseProvider):
messages: Messages,
stream: bool,
proxy: str = None,
- web_driver: WebDriver = None,
+ webdriver: WebDriver = None,
user_data_dir: str = None,
headless: bool = True,
**kwargs
@@ -44,7 +45,7 @@ class Poe(BaseProvider):
raise ValueError(f"Model are not supported: {model}")
prompt = format_prompt(messages)
- session = WebDriverSession(web_driver, user_data_dir, headless, proxy=proxy)
+ session = WebDriverSession(webdriver, user_data_dir, headless, proxy=proxy)
with session as driver:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
@@ -80,8 +81,8 @@ class Poe(BaseProvider):
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea[class^='GrowingTextArea']")))
except:
# Reopen browser for login
- if not web_driver:
- driver = session.reopen(headless=False)
+ if not webdriver:
+ driver = session.reopen()
driver.get(f"{cls.url}/{models[model]['name']}")
wait = WebDriverWait(driver, 240)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea[class^='GrowingTextArea']")))
diff --git a/g4f/Provider/needs_auth/Raycast.py b/g4f/Provider/needs_auth/Raycast.py
index 4570fd9f..d7be98ac 100644
--- a/g4f/Provider/needs_auth/Raycast.py
+++ b/g4f/Provider/needs_auth/Raycast.py
@@ -60,18 +60,3 @@ class Raycast(BaseProvider):
token = completion_chunk['text']
if token != None:
yield token
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("temperature", "float"),
- ("top_p", "int"),
- ("model", "str"),
- ("auth", "str"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
diff --git a/g4f/Provider/needs_auth/Theb.py b/g4f/Provider/needs_auth/Theb.py
index cf33f0c6..49ee174b 100644
--- a/g4f/Provider/needs_auth/Theb.py
+++ b/g4f/Provider/needs_auth/Theb.py
@@ -4,7 +4,8 @@ import time
from ...typing import CreateResult, Messages
from ..base_provider import BaseProvider
-from ..helper import WebDriver, WebDriverSession, format_prompt
+from ..helper import format_prompt
+from ..webdriver import WebDriver, WebDriverSession
models = {
"theb-ai": "TheB.AI",
@@ -44,14 +45,14 @@ class Theb(BaseProvider):
messages: Messages,
stream: bool,
proxy: str = None,
- web_driver: WebDriver = None,
+ webdriver: WebDriver = None,
virtual_display: bool = True,
**kwargs
) -> CreateResult:
if model in models:
model = models[model]
prompt = format_prompt(messages)
- web_session = WebDriverSession(web_driver, virtual_display=virtual_display, proxy=proxy)
+ web_session = WebDriverSession(webdriver, virtual_display=virtual_display, proxy=proxy)
with web_session as driver:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
@@ -61,22 +62,16 @@ class Theb(BaseProvider):
# Register fetch hook
script = """
window._fetch = window.fetch;
-window.fetch = (url, options) => {
+window.fetch = async (url, options) => {
// Call parent fetch method
- const result = window._fetch(url, options);
+ const response = await window._fetch(url, options);
if (!url.startsWith("/api/conversation")) {
return result;
}
- // Load response reader
- result.then((response) => {
- if (!response.body.locked) {
- window._reader = response.body.getReader();
- }
- });
- // Return dummy response
- return new Promise((resolve, reject) => {
- resolve(new Response(new ReadableStream()))
- });
+ // Copy response
+ copy = response.clone();
+ window._reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
+ return copy;
}
window._last_message = "";
"""
@@ -97,7 +92,6 @@ window._last_message = "";
wait = WebDriverWait(driver, 240)
wait.until(EC.visibility_of_element_located((By.ID, "textareaAutosize")))
- time.sleep(200)
try:
driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click()
driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click()
@@ -134,9 +128,8 @@ if(window._reader) {
if (chunk['done']) {
return null;
}
- text = (new TextDecoder()).decode(chunk['value']);
message = '';
- text.split('\\r\\n').forEach((line, index) => {
+ chunk['value'].split('\\r\\n').forEach((line, index) => {
if (line.startsWith('data: ')) {
try {
line = JSON.parse(line.substring('data: '.length));
diff --git a/g4f/Provider/webdriver.py b/g4f/Provider/webdriver.py
new file mode 100644
index 00000000..da3b13ed
--- /dev/null
+++ b/g4f/Provider/webdriver.py
@@ -0,0 +1,92 @@
+from __future__ import annotations
+
+import time
+from platformdirs import user_config_dir
+try:
+ from selenium.webdriver.remote.webdriver import WebDriver
+except ImportError:
+ class WebDriver():
+ pass
+try:
+ from undetected_chromedriver import Chrome, ChromeOptions
+except ImportError:
+ class Chrome():
+ def __init__():
+ raise RuntimeError('Please install the "undetected_chromedriver" package')
+ class ChromeOptions():
+ def add_argument():
+ pass
+try:
+ from pyvirtualdisplay import Display
+ has_pyvirtualdisplay = True
+except ImportError:
+ has_pyvirtualdisplay = False
+
+def get_browser(
+ user_data_dir: str = None,
+ headless: bool = False,
+ proxy: str = None,
+ options: ChromeOptions = None
+) -> Chrome:
+ if user_data_dir == None:
+ user_data_dir = user_config_dir("g4f")
+ if proxy:
+ if not options:
+ options = ChromeOptions()
+ options.add_argument(f'--proxy-server={proxy}')
+ return Chrome(options=options, user_data_dir=user_data_dir, headless=headless)
+
+class WebDriverSession():
+ def __init__(
+ self,
+ webdriver: WebDriver = None,
+ user_data_dir: str = None,
+ headless: bool = False,
+ virtual_display: bool = False,
+ proxy: str = None,
+ options: ChromeOptions = None
+ ):
+ self.webdriver = webdriver
+ self.user_data_dir = user_data_dir
+ self.headless = headless
+ self.virtual_display = None
+ if has_pyvirtualdisplay and virtual_display:
+ self.virtual_display = Display(size=(1920,1080))
+ self.proxy = proxy
+ self.options = options
+ self.default_driver = None
+
+ def reopen(
+ self,
+ user_data_dir: str = None,
+ headless: bool = False,
+ virtual_display: bool = False
+ ) -> WebDriver:
+ if user_data_dir == None:
+ user_data_dir = self.user_data_dir
+ if self.default_driver:
+ self.default_driver.quit()
+ if not virtual_display and self.virtual_display:
+ self.virtual_display.stop()
+ self.virtual_display = None
+ self.default_driver = get_browser(user_data_dir, headless, self.proxy)
+ return self.default_driver
+
+ def __enter__(self) -> WebDriver:
+ if self.webdriver:
+ return self.webdriver
+ if self.virtual_display:
+ self.virtual_display.start()
+ self.default_driver = get_browser(self.user_data_dir, self.headless, self.proxy, self.options)
+ return self.default_driver
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ if self.default_driver:
+ try:
+ self.default_driver.close()
+ except:
+ pass
+ time.sleep(0.1)
+ self.default_driver.quit()
+ if self.virtual_display:
+ self.virtual_display.stop() \ No newline at end of file
diff --git a/g4f/__init__.py b/g4f/__init__.py
index faef7923..2c9ef7d7 100644
--- a/g4f/__init__.py
+++ b/g4f/__init__.py
@@ -1,8 +1,8 @@
from __future__ import annotations
from requests import get
from .models import Model, ModelUtils, _all_models
-from .Provider import BaseProvider, RetryProvider
-from .typing import Messages, CreateResult, Union, List
+from .Provider import BaseProvider, AsyncGeneratorProvider, RetryProvider
+from .typing import Messages, CreateResult, AsyncResult, Union, List
from . import debug
version = '0.1.8.7'
@@ -80,13 +80,15 @@ class ChatCompletion:
messages : Messages,
provider : Union[type[BaseProvider], None] = None,
stream : bool = False,
- ignored : List[str] = None, **kwargs) -> str:
-
- if stream:
- raise ValueError('"create_async" does not support "stream" argument')
-
+ ignored : List[str] = None,
+ **kwargs) -> Union[AsyncResult, str]:
model, provider = get_model_and_provider(model, provider, False, ignored)
+ if stream:
+ if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider):
+ return await provider.create_async_generator(model.name, messages, **kwargs)
+ raise ValueError(f'{provider.__name__} does not support "stream" argument')
+
return await provider.create_async(model.name, messages, **kwargs)
class Completion: