summaryrefslogtreecommitdiffstats
path: root/g4f/Provider
diff options
context:
space:
mode:
authorH Lohaus <hlohaus@users.noreply.github.com>2024-03-11 07:47:35 +0100
committerGitHub <noreply@github.com>2024-03-11 07:47:35 +0100
commit0b850ac9fcabc52fecdeea8d56e620e44b35d180 (patch)
treef92578686f34476be49422889d4587d8f3cf2fa6 /g4f/Provider
parentMerge pull request #1670 from xtekky/xtekky-patch-1 (diff)
parentFix HuggingChat and PerplexityLabs and add HuggingFace provider (diff)
downloadgpt4free-0.2.3.1.tar
gpt4free-0.2.3.1.tar.gz
gpt4free-0.2.3.1.tar.bz2
gpt4free-0.2.3.1.tar.lz
gpt4free-0.2.3.1.tar.xz
gpt4free-0.2.3.1.tar.zst
gpt4free-0.2.3.1.zip
Diffstat (limited to 'g4f/Provider')
-rw-r--r--g4f/Provider/BingCreateImages.py4
-rw-r--r--g4f/Provider/HuggingChat.py35
-rw-r--r--g4f/Provider/HuggingFace.py75
-rw-r--r--g4f/Provider/Llama2.py16
-rw-r--r--g4f/Provider/PerplexityLabs.py47
-rw-r--r--g4f/Provider/You.py41
-rw-r--r--g4f/Provider/__init__.py1
-rw-r--r--g4f/Provider/bing/create_images.py4
8 files changed, 173 insertions, 50 deletions
diff --git a/g4f/Provider/BingCreateImages.py b/g4f/Provider/BingCreateImages.py
index 901b7787..c465c1d8 100644
--- a/g4f/Provider/BingCreateImages.py
+++ b/g4f/Provider/BingCreateImages.py
@@ -2,7 +2,7 @@ from __future__ import annotations
import asyncio
import os
-from typing import Generator
+from typing import Iterator, Union
from ..cookies import get_cookies
from ..image import ImageResponse
@@ -16,7 +16,7 @@ class BingCreateImages:
self.cookies = cookies
self.proxy = proxy
- def create(self, prompt: str) -> Generator[ImageResponse, None, None]:
+ def create(self, prompt: str) -> Iterator[Union[ImageResponse, str]]:
"""
Generator for creating imagecompletion based on a prompt.
diff --git a/g4f/Provider/HuggingChat.py b/g4f/Provider/HuggingChat.py
index 3ea9f306..9644880c 100644
--- a/g4f/Provider/HuggingChat.py
+++ b/g4f/Provider/HuggingChat.py
@@ -1,12 +1,12 @@
from __future__ import annotations
-import json, uuid
+import json
from aiohttp import ClientSession, BaseConnector
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt, get_cookies, get_connector
+from .helper import format_prompt, get_connector
class HuggingChat(AsyncGeneratorProvider, ProviderModelMixin):
@@ -24,7 +24,6 @@ class HuggingChat(AsyncGeneratorProvider, ProviderModelMixin):
]
model_aliases = {
"openchat/openchat_3.5": "openchat/openchat-3.5-1210",
- "mistralai/Mixtral-8x7B-Instruct-v0.1": "mistralai/Mistral-7B-Instruct-v0.2"
}
@classmethod
@@ -39,9 +38,11 @@ class HuggingChat(AsyncGeneratorProvider, ProviderModelMixin):
cookies: dict = None,
**kwargs
) -> AsyncResult:
- if not cookies:
- cookies = get_cookies(".huggingface.co", False)
-
+ options = {"model": cls.get_model(model)}
+ system_prompt = "\n".join([message["content"] for message in messages if message["role"] == "system"])
+ if system_prompt:
+ options["preprompt"] = system_prompt
+ messages = [message for message in messages if message["role"] != "system"]
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
}
@@ -50,20 +51,27 @@ class HuggingChat(AsyncGeneratorProvider, ProviderModelMixin):
headers=headers,
connector=get_connector(connector, proxy)
) as session:
- async with session.post(f"{cls.url}/conversation", json={"model": cls.get_model(model)}, proxy=proxy) as response:
+ async with session.post(f"{cls.url}/conversation", json=options, proxy=proxy) as response:
+ response.raise_for_status()
conversation_id = (await response.json())["conversationId"]
-
- send = {
- "id": str(uuid.uuid4()),
+ async with session.get(f"{cls.url}/conversation/{conversation_id}/__data.json") as response:
+ response.raise_for_status()
+ data: list = (await response.json())["nodes"][1]["data"]
+ keys: list[int] = data[data[0]["messages"]]
+ message_keys: dict = data[keys[0]]
+ message_id: str = data[message_keys["id"]]
+ options = {
+ "id": message_id,
"inputs": format_prompt(messages),
+ "is_continue": False,
"is_retry": False,
- "response_id": str(uuid.uuid4()),
"web_search": web_search
}
- async with session.post(f"{cls.url}/conversation/{conversation_id}", json=send, proxy=proxy) as response:
+ async with session.post(f"{cls.url}/conversation/{conversation_id}", json=options) as response:
first_token = True
async for line in response.content:
- line = json.loads(line[:-1])
+ response.raise_for_status()
+ line = json.loads(line)
if "type" not in line:
raise RuntimeError(f"Response: {line}")
elif line["type"] == "stream":
@@ -74,6 +82,5 @@ class HuggingChat(AsyncGeneratorProvider, ProviderModelMixin):
yield token
elif line["type"] == "finalAnswer":
break
-
async with session.delete(f"{cls.url}/conversation/{conversation_id}", proxy=proxy) as response:
response.raise_for_status()
diff --git a/g4f/Provider/HuggingFace.py b/g4f/Provider/HuggingFace.py
new file mode 100644
index 00000000..a73411ce
--- /dev/null
+++ b/g4f/Provider/HuggingFace.py
@@ -0,0 +1,75 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession, BaseConnector
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import get_connector
+from ..errors import RateLimitError, ModelNotFoundError
+
+class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://huggingface.co/chat"
+ working = True
+ supports_message_history = True
+ default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool = True,
+ proxy: str = None,
+ connector: BaseConnector = None,
+ api_base: str = "https://api-inference.huggingface.co",
+ api_key: str = None,
+ max_new_tokens: int = 1024,
+ temperature: float = 0.7,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+ headers = {}
+ if api_key is not None:
+ headers["Authorization"] = f"Bearer {api_key}"
+ params = {
+ "return_full_text": False,
+ "max_new_tokens": max_new_tokens,
+ "temperature": temperature,
+ **kwargs
+ }
+ payload = {"inputs": format_prompt(messages), "parameters": params, "stream": stream}
+ async with ClientSession(
+ headers=headers,
+ connector=get_connector(connector, proxy)
+ ) as session:
+ async with session.post(f"{api_base.rstrip('/')}/models/{model}", json=payload) as response:
+ if response.status == 429:
+ raise RateLimitError("Rate limit reached. Set a api_key")
+ elif response.status == 404:
+ raise ModelNotFoundError(f"Model is not supported: {model}")
+ elif response.status != 200:
+ raise RuntimeError(f"Response {response.status}: {await response.text()}")
+ if stream:
+ first = True
+ async for line in response.content:
+ if line.startswith(b"data:"):
+ data = json.loads(line[5:])
+ if not data["token"]["special"]:
+ chunk = data["token"]["text"]
+ if first:
+ first = False
+ chunk = chunk.lstrip()
+ yield chunk
+ else:
+ yield (await response.json())[0]["generated_text"].strip()
+
+def format_prompt(messages: Messages) -> str:
+ system_messages = [message["content"] for message in messages if message["role"] == "system"]
+ question = " ".join([messages[-1]["content"], *system_messages])
+ history = "".join([
+ f"<s>[INST]{messages[idx-1]['content']} [/INST] {message}</s>"
+ for idx, message in enumerate(messages)
+ if message["role"] == "assistant"
+ ])
+ return f"{history}<s>[INST] {question} [/INST]" \ No newline at end of file
diff --git a/g4f/Provider/Llama2.py b/g4f/Provider/Llama2.py
index d1f8e194..6a94eea1 100644
--- a/g4f/Provider/Llama2.py
+++ b/g4f/Provider/Llama2.py
@@ -28,6 +28,10 @@ class Llama2(AsyncGeneratorProvider, ProviderModelMixin):
model: str,
messages: Messages,
proxy: str = None,
+ system_message: str = "You are a helpful assistant.",
+ temperature: float = 0.75,
+ top_p: float = 0.9,
+ max_tokens: int = 8000,
**kwargs
) -> AsyncResult:
headers = {
@@ -47,14 +51,18 @@ class Llama2(AsyncGeneratorProvider, ProviderModelMixin):
"TE": "trailers"
}
async with ClientSession(headers=headers) as session:
+ system_messages = [message["content"] for message in messages if message["role"] == "system"]
+ if system_messages:
+ system_message = "\n".join(system_messages)
+ messages = [message for message in messages if message["role"] != "system"]
prompt = format_prompt(messages)
data = {
"prompt": prompt,
"model": cls.get_model(model),
- "systemPrompt": kwargs.get("system_message", "You are a helpful assistant."),
- "temperature": kwargs.get("temperature", 0.75),
- "topP": kwargs.get("top_p", 0.9),
- "maxTokens": kwargs.get("max_tokens", 8000),
+ "systemPrompt": system_message,
+ "temperature": temperature,
+ "topP": top_p,
+ "maxTokens": max_tokens,
"image": None
}
started = False
diff --git a/g4f/Provider/PerplexityLabs.py b/g4f/Provider/PerplexityLabs.py
index a82227cd..de2d1b71 100644
--- a/g4f/Provider/PerplexityLabs.py
+++ b/g4f/Provider/PerplexityLabs.py
@@ -14,17 +14,18 @@ WS_URL = "wss://labs-api.perplexity.ai/socket.io/"
class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://labs.perplexity.ai"
working = True
- default_model = 'pplx-70b-online'
+ default_model = "sonar-medium-online"
models = [
- 'pplx-7b-online', 'pplx-70b-online', 'pplx-7b-chat', 'pplx-70b-chat', 'mistral-7b-instruct',
- 'codellama-34b-instruct', 'llama-2-70b-chat', 'llava-7b-chat', 'mixtral-8x7b-instruct',
- 'mistral-medium', 'related'
+ "sonar-small-online", "sonar-medium-online", "sonar-small-chat", "sonar-medium-chat", "mistral-7b-instruct",
+ "codellama-70b-instruct", "llava-v1.5-7b-wrapper", "llava-v1.6-34b", "mixtral-8x7b-instruct",
+ "gemma-2b-it", "gemma-7b-it"
+ "mistral-medium", "related"
]
model_aliases = {
"mistralai/Mistral-7B-Instruct-v0.1": "mistral-7b-instruct",
- "meta-llama/Llama-2-70b-chat-hf": "llama-2-70b-chat",
"mistralai/Mixtral-8x7B-Instruct-v0.1": "mixtral-8x7b-instruct",
- "codellama/CodeLlama-34b-Instruct-hf": "codellama-34b-instruct"
+ "codellama/CodeLlama-70b-Instruct-hf": "codellama-70b-instruct",
+ "llava-v1.5-7b": "llava-v1.5-7b-wrapper"
}
@classmethod
@@ -50,38 +51,40 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
"TE": "trailers",
}
async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session:
- t = format(random.getrandbits(32), '08x')
+ t = format(random.getrandbits(32), "08x")
async with session.get(
f"{API_URL}?EIO=4&transport=polling&t={t}"
) as response:
text = await response.text()
- sid = json.loads(text[1:])['sid']
+ sid = json.loads(text[1:])["sid"]
post_data = '40{"jwt":"anonymous-ask-user"}'
async with session.post(
- f'{API_URL}?EIO=4&transport=polling&t={t}&sid={sid}',
+ f"{API_URL}?EIO=4&transport=polling&t={t}&sid={sid}",
data=post_data
) as response:
- assert await response.text() == 'OK'
+ assert await response.text() == "OK"
- async with session.ws_connect(f'{WS_URL}?EIO=4&transport=websocket&sid={sid}', autoping=False) as ws:
- await ws.send_str('2probe')
- assert(await ws.receive_str() == '3probe')
- await ws.send_str('5')
+ async with session.ws_connect(f"{WS_URL}?EIO=4&transport=websocket&sid={sid}", autoping=False) as ws:
+ await ws.send_str("2probe")
+ assert(await ws.receive_str() == "3probe")
+ await ws.send_str("5")
assert(await ws.receive_str())
- assert(await ws.receive_str() == '6')
+ assert(await ws.receive_str() == "6")
message_data = {
- 'version': '2.2',
- 'source': 'default',
- 'model': cls.get_model(model),
- 'messages': messages
+ "version": "2.5",
+ "source": "default",
+ "model": cls.get_model(model),
+ "messages": messages
}
- await ws.send_str('42' + json.dumps(['perplexity_labs', message_data]))
+ await ws.send_str("42" + json.dumps(["perplexity_labs", message_data]))
last_message = 0
while True:
message = await ws.receive_str()
- if message == '2':
- await ws.send_str('3')
+ if message == "2":
+ if last_message == 0:
+ raise RuntimeError("Unknown error")
+ await ws.send_str("3")
continue
try:
data = json.loads(message[2:])[1]
diff --git a/g4f/Provider/You.py b/g4f/Provider/You.py
index b21fd582..1fdaf06d 100644
--- a/g4f/Provider/You.py
+++ b/g4f/Provider/You.py
@@ -1,21 +1,37 @@
from __future__ import annotations
+import re
import json
import base64
import uuid
from aiohttp import ClientSession, FormData, BaseConnector
from ..typing import AsyncResult, Messages, ImageType, Cookies
-from .base_provider import AsyncGeneratorProvider
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..providers.helper import get_connector, format_prompt
-from ..image import to_bytes
+from ..image import to_bytes, ImageResponse
from ..requests.defaults import DEFAULT_HEADERS
-class You(AsyncGeneratorProvider):
+class You(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://you.com"
working = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
+ default_model = "gpt-3.5-turbo"
+ models = [
+ "gpt-3.5-turbo",
+ "gpt-4",
+ "gpt-4-turbo",
+ "claude-instant",
+ "claude-2",
+ "claude-3-opus",
+ "claude-3-sonnet",
+ "gemini-pro",
+ "zephyr"
+ ]
+ model_aliases = {
+ "claude-v2": "claude-2"
+ }
_cookies = None
_cookies_used = 0
@@ -35,10 +51,15 @@ class You(AsyncGeneratorProvider):
connector=get_connector(connector, proxy),
headers=DEFAULT_HEADERS
) as client:
- if image:
+ if image is not None:
chat_mode = "agent"
- elif model == "gpt-4":
- chat_mode = model
+ elif not model or model == cls.default_model:
+ chat_mode = "default"
+ elif model.startswith("dall-e"):
+ chat_mode = "create"
+ else:
+ chat_mode = "custom"
+ model = cls.get_model(model)
cookies = await cls.get_cookies(client) if chat_mode != "default" else None
upload = json.dumps([await cls.upload_file(client, cookies, to_bytes(image), image_name)]) if image else ""
#questions = [message["content"] for message in messages if message["role"] == "user"]
@@ -63,6 +84,8 @@ class You(AsyncGeneratorProvider):
"userFiles": upload,
"selectedChatMode": chat_mode,
}
+ if chat_mode == "custom":
+ params["selectedAIModel"] = model.replace("-", "_")
async with (client.post if chat_mode == "default" else client.get)(
f"{cls.url}/api/streamingSearch",
data=data,
@@ -80,7 +103,11 @@ class You(AsyncGeneratorProvider):
if event == "youChatToken" and event in data:
yield data[event]
elif event == "youChatUpdate" and "t" in data:
- yield data["t"]
+ match = re.search(r"!\[fig\]\((.+?)\)", data["t"])
+ if match:
+ yield ImageResponse(match.group(1), messages[-1]["content"])
+ else:
+ yield data["t"]
@classmethod
async def upload_file(cls, client: ClientSession, cookies: Cookies, file: bytes, filename: str = None) -> dict:
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 7832f871..462fc249 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -46,6 +46,7 @@ from .GptGod import GptGod
from .GptTalkRu import GptTalkRu
from .Hashnode import Hashnode
from .HuggingChat import HuggingChat
+from .HuggingFace import HuggingFace
from .Koala import Koala
from .Liaobots import Liaobots
from .Llama2 import Llama2
diff --git a/g4f/Provider/bing/create_images.py b/g4f/Provider/bing/create_images.py
index f6a8a372..cb357708 100644
--- a/g4f/Provider/bing/create_images.py
+++ b/g4f/Provider/bing/create_images.py
@@ -20,7 +20,7 @@ except ImportError:
from ...providers.create_images import CreateImagesProvider
from ..helper import get_connector
from ...providers.types import ProviderType
-from ...errors import MissingRequirementsError
+from ...errors import MissingRequirementsError, RateLimitError
from ...webdriver import WebDriver, get_driver_cookies, get_browser
BING_URL = "https://www.bing.com"
@@ -125,6 +125,8 @@ async def create_images(session: ClientSession, prompt: str, proxy: str = None,
async with session.post(url, allow_redirects=False, data=payload, timeout=timeout) as response:
response.raise_for_status()
text = (await response.text()).lower()
+ if "0 coins available" in text:
+ raise RateLimitError("No coins left. Log in with a different account or wait a while")
for error in ERRORS:
if error in text:
raise RuntimeError(f"Create images failed: {error}")