summaryrefslogtreecommitdiffstats
path: root/g4f
diff options
context:
space:
mode:
Diffstat (limited to 'g4f')
-rw-r--r--g4f/Provider/PerplexityLabs.py97
-rw-r--r--g4f/Provider/__init__.py5
-rw-r--r--g4f/Provider/bing/upload_image.py36
-rw-r--r--g4f/gui/client/html/index.html4
-rw-r--r--g4f/gui/client/js/chat.v1.js8
-rw-r--r--g4f/gui/server/backend.py2
-rw-r--r--g4f/image.py16
-rw-r--r--g4f/models.py11
8 files changed, 151 insertions, 28 deletions
diff --git a/g4f/Provider/PerplexityLabs.py b/g4f/Provider/PerplexityLabs.py
new file mode 100644
index 00000000..7e76aeef
--- /dev/null
+++ b/g4f/Provider/PerplexityLabs.py
@@ -0,0 +1,97 @@
+from __future__ import annotations
+
+import random
+import json
+from aiohttp import ClientSession, WSMsgType
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider
+
+API_URL = "https://labs-api.perplexity.ai/socket.io/"
+WS_URL = "wss://labs-api.perplexity.ai/socket.io/"
+MODELS = ['pplx-7b-online', 'pplx-70b-online', 'pplx-7b-chat', 'pplx-70b-chat', 'mistral-7b-instruct',
+ 'codellama-34b-instruct', 'llama-2-70b-chat', 'llava-7b-chat', 'mixtral-8x7b-instruct',
+ 'mistral-medium', 'related']
+DEFAULT_MODEL = MODELS[1]
+MODEL_MAP = {
+ "mistralai/Mistral-7B-Instruct-v0.1": "mistral-7b-instruct",
+ "meta-llama/Llama-2-70b-chat-hf": "llama-2-70b-chat",
+ "mistralai/Mixtral-8x7B-Instruct-v0.1": "mixtral-8x7b-instruct",
+}
+
+class PerplexityLabs(AsyncGeneratorProvider):
+ url = "https://labs.perplexity.ai"
+ working = True
+ supports_gpt_35_turbo = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ if not model:
+ model = DEFAULT_MODEL
+ elif model in MODEL_MAP:
+ model = MODEL_MAP[model]
+ elif model not in MODELS:
+ raise ValueError(f"Model is not supported: {model}")
+ headers = {
+ "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:121.0) Gecko/20100101 Firefox/121.0",
+ "Accept": "*/*",
+ "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Origin": cls.url,
+ "Connection": "keep-alive",
+ "Referer": f"{cls.url}/",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-site",
+ "TE": "trailers",
+ }
+ async with ClientSession(headers=headers) as session:
+ t = format(random.getrandbits(32), '08x')
+ async with session.get(
+ f"{API_URL}?EIO=4&transport=polling&t={t}",
+ proxy=proxy
+ ) as response:
+ text = await response.text()
+
+ sid = json.loads(text[1:])['sid']
+ post_data = '40{"jwt":"anonymous-ask-user"}'
+ async with session.post(
+ f'{API_URL}?EIO=4&transport=polling&t={t}&sid={sid}',
+ data=post_data,
+ proxy=proxy
+ ) as response:
+ assert await response.text() == 'OK'
+
+ async with session.ws_connect(f'{WS_URL}?EIO=4&transport=websocket&sid={sid}', autoping=False) as ws:
+ await ws.send_str('2probe')
+ assert(await ws.receive_str() == '3probe')
+ await ws.send_str('5')
+ assert(await ws.receive_str())
+ assert(await ws.receive_str() == '6')
+ message_data = {
+ 'version': '2.2',
+ 'source': 'default',
+ 'model': model,
+ 'messages': messages
+ }
+ await ws.send_str('42' + json.dumps(['perplexity_playground', message_data]))
+ last_message = 0
+ while True:
+ message = await ws.receive_str()
+ if message == '2':
+ await ws.send_str('3')
+ continue
+ try:
+ data = json.loads(message[2:])[1]
+ yield data["output"][last_message:]
+ last_message = len(data["output"])
+ if data["final"]:
+ break
+ except:
+ raise RuntimeError(f"Message: {message}") \ No newline at end of file
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index ee8d2c1b..5ac5cfca 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -9,10 +9,11 @@ from .needs_auth import *
from .unfinished import *
from .selenium import *
-from .Aura import Aura
from .AiAsk import AiAsk
from .AiChatOnline import AiChatOnline
from .AItianhu import AItianhu
+from .Aura import Aura
+from .Bestim import Bestim
from .Bing import Bing
from .ChatAnywhere import ChatAnywhere
from .ChatBase import ChatBase
@@ -45,12 +46,12 @@ from .Koala import Koala
from .Liaobots import Liaobots
from .Llama2 import Llama2
from .OnlineGpt import OnlineGpt
+from .PerplexityLabs import PerplexityLabs
from .Phind import Phind
from .Pi import Pi
from .Vercel import Vercel
from .Ylokh import Ylokh
from .You import You
-from .Bestim import Bestim
import sys
diff --git a/g4f/Provider/bing/upload_image.py b/g4f/Provider/bing/upload_image.py
index 4d70659f..bb5687a8 100644
--- a/g4f/Provider/bing/upload_image.py
+++ b/g4f/Provider/bing/upload_image.py
@@ -82,13 +82,16 @@ def build_image_upload_payload(image_bin: str, tone: str) -> Tuple[str, str]:
Tuple[str, str]: The data and boundary for the payload.
"""
boundary = "----WebKitFormBoundary" + ''.join(random.choices(string.ascii_letters + string.digits, k=16))
- data = f"--{boundary}\r\n" \
- f"Content-Disposition: form-data; name=\"knowledgeRequest\"\r\n\r\n" \
- f"{json.dumps(build_knowledge_request(tone), ensure_ascii=False)}\r\n" \
- f"--{boundary}\r\n" \
- f"Content-Disposition: form-data; name=\"imageBase64\"\r\n\r\n" \
- f"{image_bin}\r\n" \
- f"--{boundary}--\r\n"
+ data = f"""--{boundary}
+Content-Disposition: form-data; name="knowledgeRequest"
+
+{json.dumps(build_knowledge_request(tone), ensure_ascii=False)}
+--{boundary}
+Content-Disposition: form-data; name="imageBase64"
+
+{image_bin}
+--{boundary}--
+"""
return data, boundary
def build_knowledge_request(tone: str) -> dict:
@@ -102,14 +105,17 @@ def build_knowledge_request(tone: str) -> dict:
dict: The knowledge request payload.
"""
return {
- 'invokedSkills': ["ImageById"],
- 'subscriptionId': "Bing.Chat.Multimodal",
- 'invokedSkillsRequestData': {
- 'enableFaceBlur': True
- },
- 'convoData': {
- 'convoid': "",
- 'convotone': tone
+ "imageInfo": {},
+ "knowledgeRequest": {
+ 'invokedSkills': ["ImageById"],
+ 'subscriptionId': "Bing.Chat.Multimodal",
+ 'invokedSkillsRequestData': {
+ 'enableFaceBlur': True
+ },
+ 'convoData': {
+ 'convoid': "",
+ 'convotone': tone
+ }
}
}
diff --git a/g4f/gui/client/html/index.html b/g4f/gui/client/html/index.html
index 95489ba4..5edb55e8 100644
--- a/g4f/gui/client/html/index.html
+++ b/g4f/gui/client/html/index.html
@@ -115,11 +115,11 @@
<textarea id="message-input" placeholder="Ask a question" cols="30" rows="10"
style="white-space: pre-wrap;resize: none;"></textarea>
<label for="image" title="Works only with Bing and OpenaiChat">
- <input type="file" id="image" name="image" accept="image/png, image/gif, image/jpeg" required/>
+ <input type="file" id="image" name="image" accept="image/png, image/gif, image/jpeg, image/svg+xml" required/>
<i class="fa-regular fa-image"></i>
</label>
<label for="file">
- <input type="file" id="file" name="file" accept="text/plain, text/html, text/xml, application/json, text/javascript, .sh, .py, .php, .css, .yaml, .sql, .svg, .log, .csv, .twig, .md" required/>
+ <input type="file" id="file" name="file" accept="text/plain, text/html, text/xml, application/json, text/javascript, .sh, .py, .php, .css, .yaml, .sql, .log, .csv, .twig, .md" required/>
<i class="fa-solid fa-paperclip"></i>
</label>
<div id="send-button">
diff --git a/g4f/gui/client/js/chat.v1.js b/g4f/gui/client/js/chat.v1.js
index 8b9bc181..d62c040b 100644
--- a/g4f/gui/client/js/chat.v1.js
+++ b/g4f/gui/client/js/chat.v1.js
@@ -660,7 +660,13 @@ observer.observe(message_input, { attributes: true });
}
document.getElementById("version_text").innerHTML = text
})()
-
+imageInput.addEventListener('click', async (event) => {
+ imageInput.value = '';
+});
+fileInput.addEventListener('click', async (event) => {
+ fileInput.value = '';
+ delete fileInput.dataset.text;
+});
fileInput.addEventListener('change', async (event) => {
if (fileInput.files.length) {
type = fileInput.files[0].type;
diff --git a/g4f/gui/server/backend.py b/g4f/gui/server/backend.py
index d5c59ed1..9346f489 100644
--- a/g4f/gui/server/backend.py
+++ b/g4f/gui/server/backend.py
@@ -137,7 +137,7 @@ class Backend_Api:
if 'image' in request.files:
file = request.files['image']
if file.filename != '' and is_allowed_extension(file.filename):
- kwargs['image'] = to_image(file.stream)
+ kwargs['image'] = to_image(file.stream, file.filename.endswith('.svg'))
if 'json' in request.form:
json_data = json.loads(request.form['json'])
else:
diff --git a/g4f/image.py b/g4f/image.py
index 24ded915..61081ea1 100644
--- a/g4f/image.py
+++ b/g4f/image.py
@@ -4,9 +4,9 @@ import base64
from .typing import ImageType, Union
from PIL import Image
-ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif', 'webp'}
+ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif', 'webp', 'svg'}
-def to_image(image: ImageType) -> Image.Image:
+def to_image(image: ImageType, is_svg: bool = False) -> Image.Image:
"""
Converts the input image to a PIL Image object.
@@ -16,6 +16,16 @@ def to_image(image: ImageType) -> Image.Image:
Returns:
Image.Image: The converted PIL Image object.
"""
+ if is_svg:
+ try:
+ import cairosvg
+ except ImportError:
+ raise RuntimeError('Install "cairosvg" package for open svg images')
+ if not isinstance(image, bytes):
+ image = image.read()
+ buffer = BytesIO()
+ cairosvg.svg2png(image, write_to=buffer)
+ image = Image.open(buffer)
if isinstance(image, str):
is_data_uri_an_image(image)
image = extract_data_uri(image)
@@ -153,6 +163,8 @@ def to_base64(image: Image.Image, compression_rate: float) -> str:
str: The base64-encoded image.
"""
output_buffer = BytesIO()
+ if image.mode != "RGB":
+ image = image.convert('RGB')
image.save(output_buffer, format="JPEG", quality=int(compression_rate * 100))
return base64.b64encode(output_buffer.getvalue()).decode()
diff --git a/g4f/models.py b/g4f/models.py
index 40a72d77..636058d3 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -5,6 +5,7 @@ from .Provider import (
Chatgpt4Online,
ChatgptDemoAi,
GeminiProChat,
+ PerplexityAi,
ChatgptNext,
HuggingChat,
ChatgptDemo,
@@ -78,7 +79,7 @@ gpt_35_long = Model(
gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
- best_provider=RetryProvider([
+ best_provider = RetryProvider([
GptGo, You,
GptForLove, ChatBase,
Chatgpt4Online,
@@ -114,20 +115,20 @@ llama2_13b = Model(
llama2_70b = Model(
name = "meta-llama/Llama-2-70b-chat-hf",
base_provider = "huggingface",
- best_provider = RetryProvider([Llama2, DeepInfra, HuggingChat])
+ best_provider = RetryProvider([Llama2, DeepInfra, HuggingChat, PerplexityAi])
)
# Mistal
mixtral_8x7b = Model(
name = "mistralai/Mixtral-8x7B-Instruct-v0.1",
base_provider = "huggingface",
- best_provider = RetryProvider([DeepInfra, HuggingChat])
+ best_provider = RetryProvider([DeepInfra, HuggingChat, PerplexityAi])
)
mistral_7b = Model(
name = "mistralai/Mistral-7B-Instruct-v0.1",
base_provider = "huggingface",
- best_provider = RetryProvider([DeepInfra, HuggingChat])
+ best_provider = RetryProvider([DeepInfra, HuggingChat, PerplexityAi])
)
# Dolphin
@@ -311,7 +312,7 @@ llama70b_v2_chat = Model(
pi = Model(
name = 'pi',
base_provider = 'inflection',
- best_provider=Pi
+ best_provider = Pi
)
class ModelUtils: