summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/deprecated
diff options
context:
space:
mode:
authorH Lohaus <hlohaus@users.noreply.github.com>2023-11-15 18:25:28 +0100
committerGitHub <noreply@github.com>2023-11-15 18:25:28 +0100
commitf04e415becbf45b1ef5a250e39f838be7608c59a (patch)
treee6712e4ed552a98d60ef8a4a6bd9d48d85a04bd7 /g4f/Provider/deprecated
parentMerge pull request #1251 from guspan-tanadi/READMENotesection (diff)
parentFix Chatgpt4Online Provider (diff)
downloadgpt4free-f04e415becbf45b1ef5a250e39f838be7608c59a.tar
gpt4free-f04e415becbf45b1ef5a250e39f838be7608c59a.tar.gz
gpt4free-f04e415becbf45b1ef5a250e39f838be7608c59a.tar.bz2
gpt4free-f04e415becbf45b1ef5a250e39f838be7608c59a.tar.lz
gpt4free-f04e415becbf45b1ef5a250e39f838be7608c59a.tar.xz
gpt4free-f04e415becbf45b1ef5a250e39f838be7608c59a.tar.zst
gpt4free-f04e415becbf45b1ef5a250e39f838be7608c59a.zip
Diffstat (limited to 'g4f/Provider/deprecated')
-rw-r--r--g4f/Provider/deprecated/Acytoo.py51
-rw-r--r--g4f/Provider/deprecated/Aibn.py58
-rw-r--r--g4f/Provider/deprecated/Ails.py101
-rw-r--r--g4f/Provider/deprecated/ChatgptDuo.py58
-rw-r--r--g4f/Provider/deprecated/Cromicle.py50
-rw-r--r--g4f/Provider/deprecated/__init__.py7
6 files changed, 324 insertions, 1 deletions
diff --git a/g4f/Provider/deprecated/Acytoo.py b/g4f/Provider/deprecated/Acytoo.py
new file mode 100644
index 00000000..0379fdd6
--- /dev/null
+++ b/g4f/Provider/deprecated/Acytoo.py
@@ -0,0 +1,51 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+
+
+class Acytoo(AsyncGeneratorProvider):
+ url = 'https://chat.acytoo.com'
+ working = False
+ supports_message_history = True
+ supports_gpt_35_turbo = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ async with ClientSession(
+ headers=_create_header()
+ ) as session:
+ async with session.post(
+ f'{cls.url}/api/completions',
+ proxy=proxy,
+ json=_create_payload(messages, **kwargs)
+ ) as response:
+ response.raise_for_status()
+ async for stream in response.content.iter_any():
+ if stream:
+ yield stream.decode()
+
+
+def _create_header():
+ return {
+ 'accept': '*/*',
+ 'content-type': 'application/json',
+ }
+
+
+def _create_payload(messages: Messages, temperature: float = 0.5, **kwargs):
+ return {
+ 'key' : '',
+ 'model' : 'gpt-3.5-turbo',
+ 'messages' : messages,
+ 'temperature' : temperature,
+ 'password' : ''
+ } \ No newline at end of file
diff --git a/g4f/Provider/deprecated/Aibn.py b/g4f/Provider/deprecated/Aibn.py
new file mode 100644
index 00000000..60cef1e4
--- /dev/null
+++ b/g4f/Provider/deprecated/Aibn.py
@@ -0,0 +1,58 @@
+from __future__ import annotations
+
+import time
+import hashlib
+
+from ...typing import AsyncResult, Messages
+from ...requests import StreamSession
+from ..base_provider import AsyncGeneratorProvider
+
+
+class Aibn(AsyncGeneratorProvider):
+ url = "https://aibn.cc"
+ working = False
+ supports_message_history = True
+ supports_gpt_35_turbo = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ timeout: int = 120,
+ **kwargs
+ ) -> AsyncResult:
+ async with StreamSession(
+ impersonate="chrome107",
+ proxies={"https": proxy},
+ timeout=timeout
+ ) as session:
+ timestamp = int(time.time())
+ data = {
+ "messages": messages,
+ "pass": None,
+ "sign": generate_signature(timestamp, messages[-1]["content"]),
+ "time": timestamp
+ }
+ async with session.post(f"{cls.url}/api/generate", json=data) as response:
+ response.raise_for_status()
+ async for chunk in response.iter_content():
+ yield chunk.decode()
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ("temperature", "float"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
+
+
+def generate_signature(timestamp: int, message: str, secret: str = "undefined"):
+ data = f"{timestamp}:{message}:{secret}"
+ return hashlib.sha256(data.encode()).hexdigest() \ No newline at end of file
diff --git a/g4f/Provider/deprecated/Ails.py b/g4f/Provider/deprecated/Ails.py
new file mode 100644
index 00000000..93c63a69
--- /dev/null
+++ b/g4f/Provider/deprecated/Ails.py
@@ -0,0 +1,101 @@
+from __future__ import annotations
+
+import hashlib
+import time
+import uuid
+import json
+from datetime import datetime
+from aiohttp import ClientSession
+
+from ...typing import SHA256, AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+
+
+class Ails(AsyncGeneratorProvider):
+ url = "https://ai.ls"
+ working = False
+ supports_message_history = True
+ supports_gpt_35_turbo = True
+
+ @staticmethod
+ async def create_async_generator(
+ model: str,
+ messages: Messages,
+ stream: bool,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "authority": "api.caipacity.com",
+ "accept": "*/*",
+ "accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
+ "authorization": "Bearer free",
+ "client-id": str(uuid.uuid4()),
+ "client-v": "0.1.278",
+ "content-type": "application/json",
+ "origin": "https://ai.ls",
+ "referer": "https://ai.ls/",
+ "sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Windows"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "cross-site",
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
+ "from-url": "https://ai.ls/?chat=1"
+ }
+ async with ClientSession(
+ headers=headers
+ ) as session:
+ timestamp = _format_timestamp(int(time.time() * 1000))
+ json_data = {
+ "model": "gpt-3.5-turbo",
+ "temperature": kwargs.get("temperature", 0.6),
+ "stream": True,
+ "messages": messages,
+ "d": datetime.now().strftime("%Y-%m-%d"),
+ "t": timestamp,
+ "s": _hash({"t": timestamp, "m": messages[-1]["content"]}),
+ }
+ async with session.post(
+ "https://api.caipacity.com/v1/chat/completions",
+ proxy=proxy,
+ json=json_data
+ ) as response:
+ response.raise_for_status()
+ start = "data: "
+ async for line in response.content:
+ line = line.decode('utf-8')
+ if line.startswith(start) and line != "data: [DONE]":
+ line = line[len(start):-1]
+ line = json.loads(line)
+ if token := line["choices"][0]["delta"].get("content"):
+ if "ai.ls" in token or "ai.ci" in token:
+ raise Exception(f"Response Error: {token}")
+ yield token
+
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ("temperature", "float"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
+
+
+def _hash(json_data: dict[str, str]) -> SHA256:
+ base_string: str = f'{json_data["t"]}:{json_data["m"]}:WI,2rU#_r:r~aF4aJ36[.Z(/8Rv93Rf:{len(json_data["m"])}'
+
+ return SHA256(hashlib.sha256(base_string.encode()).hexdigest())
+
+
+def _format_timestamp(timestamp: int) -> str:
+ e = timestamp
+ n = e % 10
+ r = n + 1 if n % 2 == 0 else n
+ return str(e - n + r) \ No newline at end of file
diff --git a/g4f/Provider/deprecated/ChatgptDuo.py b/g4f/Provider/deprecated/ChatgptDuo.py
new file mode 100644
index 00000000..c77c6a1c
--- /dev/null
+++ b/g4f/Provider/deprecated/ChatgptDuo.py
@@ -0,0 +1,58 @@
+from __future__ import annotations
+
+from ...typing import Messages
+from curl_cffi.requests import AsyncSession
+from ..base_provider import AsyncProvider, format_prompt
+
+
+class ChatgptDuo(AsyncProvider):
+ url = "https://chatgptduo.com"
+ supports_gpt_35_turbo = True
+ working = False
+
+ @classmethod
+ async def create_async(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ timeout: int = 120,
+ **kwargs
+ ) -> str:
+ async with AsyncSession(
+ impersonate="chrome107",
+ proxies={"https": proxy},
+ timeout=timeout
+ ) as session:
+ prompt = format_prompt(messages),
+ data = {
+ "prompt": prompt,
+ "search": prompt,
+ "purpose": "ask",
+ }
+ response = await session.post(f"{cls.url}/", data=data)
+ response.raise_for_status()
+ data = response.json()
+
+ cls._sources = [{
+ "title": source["title"],
+ "url": source["link"],
+ "snippet": source["snippet"]
+ } for source in data["results"]]
+
+ return data["answer"]
+
+ @classmethod
+ def get_sources(cls):
+ return cls._sources
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file
diff --git a/g4f/Provider/deprecated/Cromicle.py b/g4f/Provider/deprecated/Cromicle.py
new file mode 100644
index 00000000..9f986cb5
--- /dev/null
+++ b/g4f/Provider/deprecated/Cromicle.py
@@ -0,0 +1,50 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+from hashlib import sha256
+from ...typing import AsyncResult, Messages, Dict
+
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt
+
+
+class Cromicle(AsyncGeneratorProvider):
+ url: str = 'https://cromicle.top'
+ working: bool = False
+ supports_gpt_35_turbo: bool = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ async with ClientSession(
+ headers=_create_header()
+ ) as session:
+ async with session.post(
+ f'{cls.url}/chat',
+ proxy=proxy,
+ json=_create_payload(format_prompt(messages))
+ ) as response:
+ response.raise_for_status()
+ async for stream in response.content.iter_any():
+ if stream:
+ yield stream.decode()
+
+
+def _create_header() -> Dict[str, str]:
+ return {
+ 'accept': '*/*',
+ 'content-type': 'application/json',
+ }
+
+
+def _create_payload(message: str) -> Dict[str, str]:
+ return {
+ 'message': message,
+ 'token': 'abc',
+ 'hash': sha256('abc'.encode() + message.encode()).hexdigest()
+ } \ No newline at end of file
diff --git a/g4f/Provider/deprecated/__init__.py b/g4f/Provider/deprecated/__init__.py
index f8e35b37..ca5ac83e 100644
--- a/g4f/Provider/deprecated/__init__.py
+++ b/g4f/Provider/deprecated/__init__.py
@@ -13,4 +13,9 @@ from .FastGpt import FastGpt
from .Aivvm import Aivvm
from .Vitalentum import Vitalentum
from .H2o import H2o
-from .Myshell import Myshell \ No newline at end of file
+from .Myshell import Myshell
+from .Acytoo import Acytoo
+from .Aibn import Aibn
+from .Ails import Ails
+from .ChatgptDuo import ChatgptDuo
+from .Cromicle import Cromicle \ No newline at end of file