summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/needs_auth
diff options
context:
space:
mode:
authorTekky <98614666+xtekky@users.noreply.github.com>2023-11-18 03:40:09 +0100
committerGitHub <noreply@github.com>2023-11-18 03:40:09 +0100
commitca3eaaffee6464adc7f8a6963461a9433be9e7bf (patch)
tree2f86c8aa7a9db061441712bf60f956ef91a660e9 /g4f/Provider/needs_auth
parentMerge pull request #1262 from hlohaus/any (diff)
parentImprove providers (diff)
downloadgpt4free-ca3eaaffee6464adc7f8a6963461a9433be9e7bf.tar
gpt4free-ca3eaaffee6464adc7f8a6963461a9433be9e7bf.tar.gz
gpt4free-ca3eaaffee6464adc7f8a6963461a9433be9e7bf.tar.bz2
gpt4free-ca3eaaffee6464adc7f8a6963461a9433be9e7bf.tar.lz
gpt4free-ca3eaaffee6464adc7f8a6963461a9433be9e7bf.tar.xz
gpt4free-ca3eaaffee6464adc7f8a6963461a9433be9e7bf.tar.zst
gpt4free-ca3eaaffee6464adc7f8a6963461a9433be9e7bf.zip
Diffstat (limited to 'g4f/Provider/needs_auth')
-rw-r--r--g4f/Provider/needs_auth/Bard.py7
-rw-r--r--g4f/Provider/needs_auth/Theb.py217
-rw-r--r--g4f/Provider/needs_auth/ThebApi.py77
-rw-r--r--g4f/Provider/needs_auth/__init__.py1
4 files changed, 218 insertions, 84 deletions
diff --git a/g4f/Provider/needs_auth/Bard.py b/g4f/Provider/needs_auth/Bard.py
index 7f73f1b3..b1df6909 100644
--- a/g4f/Provider/needs_auth/Bard.py
+++ b/g4f/Provider/needs_auth/Bard.py
@@ -32,7 +32,7 @@ class Bard(BaseProvider):
try:
driver.get(f"{cls.url}/chat")
- wait = WebDriverWait(driver, 10)
+ wait = WebDriverWait(driver, 10 if headless else 240)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea")))
except:
# Reopen browser for login
@@ -61,14 +61,13 @@ XMLHttpRequest.prototype.open = function(method, url) {
"""
driver.execute_script(script)
- # Input and submit prompt
+ # Submit prompt
driver.find_element(By.CSS_SELECTOR, "div.ql-editor.ql-blank.textarea").send_keys(prompt)
driver.find_element(By.CSS_SELECTOR, "button.send-button").click()
# Yield response
- script = "return window._message;"
while True:
- chunk = driver.execute_script(script)
+ chunk = driver.execute_script("return window._message;")
if chunk:
yield chunk
return
diff --git a/g4f/Provider/needs_auth/Theb.py b/g4f/Provider/needs_auth/Theb.py
index b3c9019d..89c69727 100644
--- a/g4f/Provider/needs_auth/Theb.py
+++ b/g4f/Provider/needs_auth/Theb.py
@@ -1,101 +1,158 @@
from __future__ import annotations
-import json
-import random
-import requests
+import time
-from ...typing import Any, CreateResult, Messages
+from ...typing import CreateResult, Messages
from ..base_provider import BaseProvider
-from ..helper import format_prompt
+from ..helper import WebDriver, format_prompt, get_browser
+models = {
+ "theb-ai": "TheB.AI",
+ "theb-ai-free": "TheB.AI Free",
+ "gpt-3.5-turbo": "GPT-3.5 Turbo (New)",
+ "gpt-3.5-turbo-16k": "GPT-3.5-16K",
+ "gpt-4-turbo": "GPT-4 Turbo",
+ "gpt-4": "GPT-4",
+ "gpt-4-32k": "GPT-4 32K",
+ "claude-2": "Claude 2",
+ "claude-instant-1": "Claude Instant 1.2",
+ "palm-2": "PaLM 2",
+ "palm-2-32k": "PaLM 2 32K",
+ "palm-2-codey": "Codey",
+ "palm-2-codey-32k": "Codey 32K",
+ "vicuna-13b-v1.5": "Vicuna v1.5 13B",
+ "llama-2-7b-chat": "Llama 2 7B",
+ "llama-2-13b-chat": "Llama 2 13B",
+ "llama-2-70b-chat": "Llama 2 70B",
+ "code-llama-7b": "Code Llama 7B",
+ "code-llama-13b": "Code Llama 13B",
+ "code-llama-34b": "Code Llama 34B",
+ "qwen-7b-chat": "Qwen 7B"
+}
class Theb(BaseProvider):
- url = "https://theb.ai"
- working = True
- supports_stream = True
- supports_gpt_35_turbo = True
- needs_auth = True
+ url = "https://beta.theb.ai"
+ working = True
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+ supports_stream = True
- @staticmethod
+ @classmethod
def create_completion(
+ cls,
model: str,
messages: Messages,
stream: bool,
proxy: str = None,
+ browser: WebDriver = None,
+ headless: bool = True,
**kwargs
) -> CreateResult:
- auth = kwargs.get("auth", {
- "bearer_token":"free",
- "org_id":"theb",
- })
-
- bearer_token = auth["bearer_token"]
- org_id = auth["org_id"]
+ if model in models:
+ model = models[model]
+ prompt = format_prompt(messages)
+ driver = browser if browser else get_browser(None, headless, proxy)
- headers = {
- 'authority': 'beta.theb.ai',
- 'accept': 'text/event-stream',
- 'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
- 'authorization': f'Bearer {bearer_token}',
- 'content-type': 'application/json',
- 'origin': 'https://beta.theb.ai',
- 'referer': 'https://beta.theb.ai/home',
- 'sec-ch-ua': '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"Windows"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'same-origin',
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
- 'x-ai-model': 'ee8d4f29cb7047f78cbe84313ed6ace8',
- }
+ from selenium.webdriver.common.by import By
+ from selenium.webdriver.support.ui import WebDriverWait
+ from selenium.webdriver.support import expected_conditions as EC
+ from selenium.webdriver.common.keys import Keys
- req_rand = random.randint(100000000, 9999999999)
+
+ try:
+ driver.get(f"{cls.url}/home")
+ wait = WebDriverWait(driver, 10 if headless else 240)
+ wait.until(EC.visibility_of_element_located((By.TAG_NAME, "body")))
+ time.sleep(0.1)
+ try:
+ driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click()
+ driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click()
+ except:
+ pass
+ if model:
+ # Load model panel
+ wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#SelectModel svg")))
+ time.sleep(0.1)
+ driver.find_element(By.CSS_SELECTOR, "#SelectModel svg").click()
+ try:
+ driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click()
+ driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click()
+ except:
+ pass
+ # Select model
+ selector = f"div.flex-col div.items-center span[title='{model}']"
+ wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, selector)))
+ span = driver.find_element(By.CSS_SELECTOR, selector)
+ container = span.find_element(By.XPATH, "//div/../..")
+ button = container.find_element(By.CSS_SELECTOR, "button.btn-blue.btn-small.border")
+ button.click()
- json_data: dict[str, Any] = {
- "text" : format_prompt(messages),
- "category" : "04f58f64a4aa4191a957b47290fee864",
- "model" : "ee8d4f29cb7047f78cbe84313ed6ace8",
- "model_params": {
- "system_prompt" : "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-3.5 architecture.\nKnowledge cutoff: 2021-09\nCurrent date: {{YYYY-MM-DD}}",
- "temperature" : kwargs.get("temperature", 1),
- "top_p" : kwargs.get("top_p", 1),
- "frequency_penalty" : kwargs.get("frequency_penalty", 0),
- "presence_penalty" : kwargs.get("presence_penalty", 0),
- "long_term_memory" : "auto"
- }
+ # Register fetch hook
+ script = """
+window._fetch = window.fetch;
+window.fetch = (url, options) => {
+ // Call parent fetch method
+ const result = window._fetch(url, options);
+ if (!url.startsWith("/api/conversation")) {
+ return result;
+ }
+ // Load response reader
+ result.then((response) => {
+ if (!response.body.locked) {
+ window._reader = response.body.getReader();
}
+ });
+ // Return dummy response
+ return new Promise((resolve, reject) => {
+ resolve(new Response(new ReadableStream()))
+ });
+}
+window._last_message = "";
+"""
+ driver.execute_script(script)
- response = requests.post(
- f"https://beta.theb.ai/api/conversation?org_id={org_id}&req_rand={req_rand}",
- headers=headers,
- json=json_data,
- stream=True,
- proxies={"https": proxy}
- )
-
- response.raise_for_status()
- content = ""
- next_content = ""
- for chunk in response.iter_lines():
- if b"content" in chunk:
- next_content = content
- data = json.loads(chunk.decode().split("data: ")[1])
- content = data["content"]
- yield content.replace(next_content, "")
+ # Submit prompt
+ wait.until(EC.visibility_of_element_located((By.ID, "textareaAutosize")))
+ driver.find_element(By.ID, "textareaAutosize").send_keys(prompt)
+ driver.find_element(By.ID, "textareaAutosize").send_keys(Keys.ENTER)
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("auth", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("temperature", "float"),
- ("presence_penalty", "int"),
- ("frequency_penalty", "int"),
- ("top_p", "int")
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
+ # Read response with reader
+ script = """
+if(window._reader) {
+ chunk = await window._reader.read();
+ if (chunk['done']) {
+ return null;
+ }
+ text = (new TextDecoder()).decode(chunk['value']);
+ message = '';
+ text.split('\\r\\n').forEach((line, index) => {
+ if (line.startsWith('data: ')) {
+ try {
+ line = JSON.parse(line.substring('data: '.length));
+ message = line["args"]["content"];
+ } catch(e) { }
+ }
+ });
+ if (message) {
+ try {
+ return message.substring(window._last_message.length);
+ } finally {
+ window._last_message = message;
+ }
+ }
+}
+return '';
+"""
+ while True:
+ chunk = driver.execute_script(script)
+ if chunk:
+ yield chunk
+ elif chunk != "":
+ break
+ else:
+ time.sleep(0.1)
+ finally:
+ if not browser:
+ driver.close()
+ time.sleep(0.1)
+ driver.quit() \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/ThebApi.py b/g4f/Provider/needs_auth/ThebApi.py
new file mode 100644
index 00000000..0441f352
--- /dev/null
+++ b/g4f/Provider/needs_auth/ThebApi.py
@@ -0,0 +1,77 @@
+from __future__ import annotations
+
+import requests
+
+from ...typing import Any, CreateResult, Messages
+from ..base_provider import BaseProvider
+
+models = {
+ "theb-ai": "TheB.AI",
+ "gpt-3.5-turbo": "GPT-3.5",
+ "gpt-3.5-turbo-16k": "GPT-3.5-16K",
+ "gpt-4-turbo": "GPT-4 Turbo",
+ "gpt-4": "GPT-4",
+ "gpt-4-32k": "GPT-4 32K",
+ "claude-2": "Claude 2",
+ "claude-1": "Claude",
+ "claude-1-100k": "Claude 100K",
+ "claude-instant-1": "Claude Instant",
+ "claude-instant-1-100k": "Claude Instant 100K",
+ "palm-2": "PaLM 2",
+ "palm-2-codey": "Codey",
+ "vicuna-13b-v1.5": "Vicuna v1.5 13B",
+ "llama-2-7b-chat": "Llama 2 7B",
+ "llama-2-13b-chat": "Llama 2 13B",
+ "llama-2-70b-chat": "Llama 2 70B",
+ "code-llama-7b": "Code Llama 7B",
+ "code-llama-13b": "Code Llama 13B",
+ "code-llama-34b": "Code Llama 34B",
+ "qwen-7b-chat": "Qwen 7B"
+}
+
+class ThebApi(BaseProvider):
+ url = "https://theb.ai"
+ working = True
+ needs_auth = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: Messages,
+ stream: bool,
+ auth: str,
+ proxy: str = None,
+ **kwargs
+ ) -> CreateResult:
+ if model and model not in models:
+ raise ValueError(f"Model are not supported: {model}")
+ headers = {
+ 'accept': 'application/json',
+ 'authorization': f'Bearer {auth}',
+ 'content-type': 'application/json',
+ }
+ # response = requests.get("https://api.baizhi.ai/v1/models", headers=headers).json()["data"]
+ # models = dict([(m["id"], m["name"]) for m in response])
+ # print(json.dumps(models, indent=4))
+ data: dict[str, Any] = {
+ "model": model if model else "gpt-3.5-turbo",
+ "messages": messages,
+ "stream": False,
+ "model_params": {
+ "system_prompt": kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-3.5 architecture."),
+ "temperature": 1,
+ "top_p": 1,
+ **kwargs
+ }
+ }
+ response = requests.post(
+ "https://api.theb.ai/v1/chat/completions",
+ headers=headers,
+ json=data,
+ proxies={"https": proxy}
+ )
+ try:
+ response.raise_for_status()
+ yield response.json()["choices"][0]["message"]["content"]
+ except:
+ raise RuntimeError(f"Response: {next(response.iter_lines()).decode()}") \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py
index 4230253e..b85cd36a 100644
--- a/g4f/Provider/needs_auth/__init__.py
+++ b/g4f/Provider/needs_auth/__init__.py
@@ -1,6 +1,7 @@
from .Bard import Bard
from .Raycast import Raycast
from .Theb import Theb
+from .ThebApi import ThebApi
from .HuggingChat import HuggingChat
from .OpenaiChat import OpenaiChat
from .OpenAssistant import OpenAssistant