summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/Blackbox.py
diff options
context:
space:
mode:
Diffstat (limited to 'g4f/Provider/Blackbox.py')
-rw-r--r--g4f/Provider/Blackbox.py44
1 files changed, 21 insertions, 23 deletions
diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py
index b259b4aa..41905537 100644
--- a/g4f/Provider/Blackbox.py
+++ b/g4f/Provider/Blackbox.py
@@ -20,15 +20,15 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
supports_system_message = True
supports_message_history = True
_last_validated_value = None
-
+
default_model = 'blackboxai'
default_vision_model = default_model
default_image_model = 'Image Generation'
image_models = ['Image Generation', 'repomap']
vision_models = [default_model, 'gpt-4o', 'gemini-pro', 'gemini-1.5-flash', 'llama-3.1-8b', 'llama-3.1-70b', 'llama-3.1-405b']
-
+
userSelectedModel = ['gpt-4o', 'gemini-pro', 'claude-sonnet-3.5', 'blackboxai-pro']
-
+
agentMode = {
'Image Generation': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
}
@@ -77,22 +77,21 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
}
additional_prefixes = {
- 'gpt-4o': '@gpt-4o',
- 'gemini-pro': '@gemini-pro',
- 'claude-sonnet-3.5': '@claude-sonnet'
- }
+ 'gpt-4o': '@gpt-4o',
+ 'gemini-pro': '@gemini-pro',
+ 'claude-sonnet-3.5': '@claude-sonnet'
+ }
model_prefixes = {
- **{mode: f"@{value['id']}" for mode, value in trendingAgentMode.items()
- if mode not in ["gemini-1.5-flash", "llama-3.1-8b", "llama-3.1-70b", "llama-3.1-405b", "repomap"]},
- **additional_prefixes
- }
+ **{
+ mode: f"@{value['id']}" for mode, value in trendingAgentMode.items()
+ if mode not in ["gemini-1.5-flash", "llama-3.1-8b", "llama-3.1-70b", "llama-3.1-405b", "repomap"]
+ },
+ **additional_prefixes
+ }
-
models = list(dict.fromkeys([default_model, *userSelectedModel, *list(agentMode.keys()), *list(trendingAgentMode.keys())]))
-
-
model_aliases = {
"gemini-flash": "gemini-1.5-flash",
"claude-3.5-sonnet": "claude-sonnet-3.5",
@@ -131,12 +130,11 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
return cls._last_validated_value
-
@staticmethod
def generate_id(length=7):
characters = string.ascii_letters + string.digits
return ''.join(random.choice(characters) for _ in range(length))
-
+
@classmethod
def add_prefix_to_messages(cls, messages: Messages, model: str) -> Messages:
prefix = cls.model_prefixes.get(model, "")
@@ -157,6 +155,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
cls,
model: str,
messages: Messages,
+ prompt: str = None,
proxy: str = None,
web_search: bool = False,
image: ImageType = None,
@@ -191,7 +190,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
}
-
+
data = {
"messages": messages,
"id": message_id,
@@ -221,26 +220,25 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
response_text = await response.text()
-
+
if model in cls.image_models:
image_matches = re.findall(r'!\[.*?\]\((https?://[^\)]+)\)', response_text)
if image_matches:
image_url = image_matches[0]
- image_response = ImageResponse(images=[image_url], alt="Generated Image")
- yield image_response
+ yield ImageResponse(image_url, prompt)
return
response_text = re.sub(r'Generated by BLACKBOX.AI, try unlimited chat https://www.blackbox.ai', '', response_text, flags=re.DOTALL)
-
+
json_match = re.search(r'\$~~~\$(.*?)\$~~~\$', response_text, re.DOTALL)
if json_match:
search_results = json.loads(json_match.group(1))
answer = response_text.split('$~~~$')[-1].strip()
-
+
formatted_response = f"{answer}\n\n**Source:**"
for i, result in enumerate(search_results, 1):
formatted_response += f"\n{i}. {result['title']}: {result['link']}"
-
+
yield formatted_response
else:
yield response_text.strip()