summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/Blackbox.py
diff options
context:
space:
mode:
Diffstat (limited to 'g4f/Provider/Blackbox.py')
-rw-r--r--g4f/Provider/Blackbox.py127
1 files changed, 95 insertions, 32 deletions
diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py
index 6e1e3949..e607a43c 100644
--- a/g4f/Provider/Blackbox.py
+++ b/g4f/Provider/Blackbox.py
@@ -1,16 +1,50 @@
from __future__ import annotations
-import uuid
-import secrets
+import re
+import json
+import random
+import string
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages, ImageType
-from ..image import to_data_uri
-from .base_provider import AsyncGeneratorProvider
+from ..image import ImageResponse, to_data_uri
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-class Blackbox(AsyncGeneratorProvider):
+class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.blackbox.ai"
+ api_endpoint = "https://www.blackbox.ai/api/chat"
working = True
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'blackbox'
+ models = [
+ 'blackbox',
+ 'gemini-1.5-flash',
+ "llama-3.1-8b",
+ 'llama-3.1-70b',
+ 'llama-3.1-405b',
+ 'ImageGenerationLV45LJp'
+ ]
+
+ model_config = {
+ "blackbox": {},
+ "gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
+ "llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
+ 'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
+ 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"},
+ 'ImageGenerationLV45LJp': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
@classmethod
async def create_async_generator(
@@ -22,43 +56,72 @@ class Blackbox(AsyncGeneratorProvider):
image_name: str = None,
**kwargs
) -> AsyncResult:
- if image is not None:
- messages[-1]["data"] = {
- "fileText": image_name,
- "imageBase64": to_data_uri(image)
- }
+ model = cls.get_model(model)
+
headers = {
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
- "Accept": "*/*",
- "Accept-Language": "en-US,en;q=0.5",
- "Accept-Encoding": "gzip, deflate, br",
- "Referer": cls.url,
- "Content-Type": "application/json",
- "Origin": cls.url,
- "DNT": "1",
- "Sec-GPC": "1",
- "Alt-Used": "www.blackbox.ai",
- "Connection": "keep-alive",
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "cache-control": "no-cache",
+ "content-type": "application/json",
+ "origin": cls.url,
+ "pragma": "no-cache",
+ "referer": f"{cls.url}/",
+ "sec-ch-ua": '"Not;A=Brand";v="24", "Chromium";v="128"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36"
}
+
async with ClientSession(headers=headers) as session:
- random_id = secrets.token_hex(16)
- random_user_id = str(uuid.uuid4())
+ if image is not None:
+ messages[-1]["data"] = {
+ "fileText": image_name,
+ "imageBase64": to_data_uri(image)
+ }
+
+ random_id = ''.join(random.choices(string.ascii_letters + string.digits, k=7))
+
data = {
"messages": messages,
"id": random_id,
- "userId": random_user_id,
+ "previewToken": None,
+ "userId": None,
"codeModelMode": True,
"agentMode": {},
"trendingAgentMode": {},
"isMicMode": False,
+ "maxTokens": None,
"isChromeExt": False,
- "playgroundMode": False,
- "webSearchMode": False,
- "userSystemPrompt": "",
- "githubToken": None
+ "githubToken": None,
+ "clickedAnswer2": False,
+ "clickedAnswer3": False,
+ "clickedForceWebSearch": False,
+ "visitFromDelta": False,
+ "mobileClient": False
}
- async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response:
+
+ if model == 'ImageGenerationLV45LJp':
+ data["agentMode"] = cls.model_config[model]
+ else:
+ data["trendingAgentMode"] = cls.model_config[model]
+
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
- async for chunk in response.content:
- if chunk:
- yield chunk.decode()
+ if model == 'ImageGenerationLV45LJp':
+ response_text = await response.text()
+ url_match = re.search(r'https://storage\.googleapis\.com/[^\s\)]+', response_text)
+ if url_match:
+ image_url = url_match.group(0)
+ yield ImageResponse(image_url, alt=messages[-1]['content'])
+ else:
+ raise Exception("Image URL not found in the response")
+ else:
+ async for chunk in response.content:
+ if chunk:
+ decoded_chunk = chunk.decode()
+ if decoded_chunk.startswith('$@$v=undefined-rv1$@$'):
+ decoded_chunk = decoded_chunk[len('$@$v=undefined-rv1$@$'):]
+ yield decoded_chunk