summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--g4f/Provider/ChatgptX.py8
-rw-r--r--g4f/Provider/Vitalentum.py7
-rw-r--r--g4f/Provider/Ylokh.py13
-rw-r--r--g4f/Provider/You.py13
-rw-r--r--g4f/Provider/Yqcloud.py32
-rw-r--r--g4f/typing.py4
6 files changed, 45 insertions, 32 deletions
diff --git a/g4f/Provider/ChatgptX.py b/g4f/Provider/ChatgptX.py
index 5621613f..2944fb26 100644
--- a/g4f/Provider/ChatgptX.py
+++ b/g4f/Provider/ChatgptX.py
@@ -4,7 +4,7 @@ import re
import json
from aiohttp import ClientSession
-from typing import AsyncGenerator, Dict, List
+from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt
@@ -18,9 +18,9 @@ class ChatgptX(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
- messages: List[Dict[str, str]],
+ messages: Messages,
**kwargs
- ) -> AsyncGenerator[str, None]:
+ ) -> AsyncResult:
headers = {
'accept-language': 'de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US',
'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
@@ -66,7 +66,7 @@ class ChatgptX(AsyncGeneratorProvider):
response.raise_for_status()
chat = await response.json()
if "response" not in chat or not chat["response"]:
- raise RuntimeError(f'Response: {data}')
+ raise RuntimeError(f'Response: {chat}')
headers = {
'authority': 'chatgptx.de',
'accept': 'text/event-stream',
diff --git a/g4f/Provider/Vitalentum.py b/g4f/Provider/Vitalentum.py
index d5265428..ade492d2 100644
--- a/g4f/Provider/Vitalentum.py
+++ b/g4f/Provider/Vitalentum.py
@@ -4,7 +4,7 @@ import json
from aiohttp import ClientSession
from .base_provider import AsyncGeneratorProvider
-from ..typing import AsyncGenerator
+from ..typing import AsyncResult, Messages
class Vitalentum(AsyncGeneratorProvider):
url = "https://app.vitalentum.io"
@@ -16,10 +16,10 @@ class Vitalentum(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
proxy: str = None,
**kwargs
- ) -> AsyncGenerator:
+ ) -> AsyncResult:
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept" : "text/event-stream",
@@ -62,6 +62,7 @@ class Vitalentum(AsyncGeneratorProvider):
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
+ ("proxy", "str"),
("temperature", "float"),
]
param = ", ".join([": ".join(p) for p in params])
diff --git a/g4f/Provider/Ylokh.py b/g4f/Provider/Ylokh.py
index a15f7894..59da0fa4 100644
--- a/g4f/Provider/Ylokh.py
+++ b/g4f/Provider/Ylokh.py
@@ -4,7 +4,7 @@ import json
from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider
-from ..typing import AsyncGenerator
+from ..typing import AsyncResult, Messages
class Ylokh(AsyncGeneratorProvider):
url = "https://chat.ylokh.xyz"
@@ -16,16 +16,16 @@ class Ylokh(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
stream: bool = True,
proxy: str = None,
- timeout: int = 30,
+ timeout: int = 120,
**kwargs
- ) -> AsyncGenerator:
+ ) -> AsyncResult:
model = model if model else "gpt-3.5-turbo"
headers = {
- "Origin" : cls.url,
- "Referer" : cls.url + "/",
+ "Origin" : cls.url,
+ "Referer": cls.url + "/",
}
data = {
"messages": messages,
@@ -69,6 +69,7 @@ class Ylokh(AsyncGeneratorProvider):
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("proxy", "str"),
+ ("timeout", "int"),
("temperature", "float"),
("top_p", "float"),
]
diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py
index 4afe1ef6..1afd18be 100644
--- a/g4f/Provider/You.py
+++ b/g4f/Provider/You.py
@@ -3,7 +3,7 @@ from __future__ import annotations
import json
from ..requests import StreamSession
-from ..typing import AsyncGenerator
+from ..typing import AsyncGenerator, Messages
from .base_provider import AsyncGeneratorProvider, format_prompt
@@ -17,19 +17,20 @@ class You(AsyncGeneratorProvider):
async def create_async_generator(
cls,
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
proxy: str = None,
- timeout: int = 30,
+ timeout: int = 120,
**kwargs,
) -> AsyncGenerator:
async with StreamSession(proxies={"https": proxy}, impersonate="chrome107", timeout=timeout) as session:
headers = {
"Accept": "text/event-stream",
- "Referer": "https://you.com/search?fromSearchBar=true&tbm=youchat",
+ "Referer": f"{cls.url}/search?fromSearchBar=true&tbm=youchat",
}
+ data = {"q": format_prompt(messages), "domain": "youchat", "chat": ""}
async with session.get(
- "https://you.com/api/streamingSearch",
- params={"q": format_prompt(messages), "domain": "youchat", "chat": ""},
+ f"{cls.url}/api/streamingSearch",
+ params=data,
headers=headers
) as response:
response.raise_for_status()
diff --git a/g4f/Provider/Yqcloud.py b/g4f/Provider/Yqcloud.py
index 74c998e2..0e96e20f 100644
--- a/g4f/Provider/Yqcloud.py
+++ b/g4f/Provider/Yqcloud.py
@@ -1,8 +1,9 @@
from __future__ import annotations
+import random
from aiohttp import ClientSession
-from ..typing import AsyncGenerator
+from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, format_prompt
@@ -14,22 +15,22 @@ class Yqcloud(AsyncGeneratorProvider):
@staticmethod
async def create_async_generator(
model: str,
- messages: list[dict[str, str]],
+ messages: Messages,
proxy: str = None,
**kwargs,
- ) -> AsyncGenerator:
+ ) -> AsyncResult:
async with ClientSession(
headers=_create_header()
) as session:
- payload = _create_payload(messages)
+ payload = _create_payload(messages, **kwargs)
async with session.post("https://api.aichatos.cloud/api/generateStream", proxy=proxy, json=payload) as response:
response.raise_for_status()
- async for stream in response.content.iter_any():
- if stream:
- stream = stream.decode()
- if "sorry, 您的ip已由于触发防滥用检测而被封禁" in stream:
+ async for chunk in response.content.iter_any():
+ if chunk:
+ chunk = chunk.decode()
+ if "sorry, 您的ip已由于触发防滥用检测而被封禁" in chunk:
raise RuntimeError("IP address is blocked by abuse detection.")
- yield stream.decode()
+ yield chunk
def _create_header():
@@ -40,12 +41,19 @@ def _create_header():
}
-def _create_payload(messages: list[dict[str, str]]):
+def _create_payload(
+ messages: Messages,
+ system_message: str = "",
+ user_id: int = None,
+ **kwargs
+):
+ if not user_id:
+ user_id = random.randint(1690000544336, 2093025544336)
return {
"prompt": format_prompt(messages),
"network": True,
- "system": "",
+ "system": system_message,
"withoutContext": False,
"stream": True,
- "userId": "#/chat/1693025544336"
+ "userId": f"#/chat/{user_id}"
}
diff --git a/g4f/typing.py b/g4f/typing.py
index 5f63c222..840e4624 100644
--- a/g4f/typing.py
+++ b/g4f/typing.py
@@ -1,5 +1,5 @@
import sys
-from typing import Any, AsyncGenerator, Generator, NewType, Tuple, Union
+from typing import Any, AsyncGenerator, Generator, NewType, Tuple, Union, List, Dict
if sys.version_info >= (3, 8):
from typing import TypedDict
@@ -8,6 +8,8 @@ else:
SHA256 = NewType('sha_256_hash', str)
CreateResult = Generator[str, None, None]
+AsyncResult = AsyncGenerator[str]
+Messages = List[Dict[str, str]]
__all__ = [
'Any',