summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/GPROChat.py
diff options
context:
space:
mode:
authorTekky <98614666+xtekky@users.noreply.github.com>2024-09-29 22:41:08 +0200
committerGitHub <noreply@github.com>2024-09-29 22:41:08 +0200
commit0deb0f60dd4985017d3fcb946e108be8d1f63846 (patch)
treee926d6f5551b4eb069e35b41479275056999e6c9 /g4f/Provider/GPROChat.py
parentAdded gpt-4o provider (diff)
parentfeat(g4f/Provider/Nexra.py): enhance model handling and add new providers (diff)
downloadgpt4free-0deb0f60dd4985017d3fcb946e108be8d1f63846.tar
gpt4free-0deb0f60dd4985017d3fcb946e108be8d1f63846.tar.gz
gpt4free-0deb0f60dd4985017d3fcb946e108be8d1f63846.tar.bz2
gpt4free-0deb0f60dd4985017d3fcb946e108be8d1f63846.tar.lz
gpt4free-0deb0f60dd4985017d3fcb946e108be8d1f63846.tar.xz
gpt4free-0deb0f60dd4985017d3fcb946e108be8d1f63846.tar.zst
gpt4free-0deb0f60dd4985017d3fcb946e108be8d1f63846.zip
Diffstat (limited to 'g4f/Provider/GPROChat.py')
-rw-r--r--g4f/Provider/GPROChat.py67
1 files changed, 67 insertions, 0 deletions
diff --git a/g4f/Provider/GPROChat.py b/g4f/Provider/GPROChat.py
new file mode 100644
index 00000000..a33c9571
--- /dev/null
+++ b/g4f/Provider/GPROChat.py
@@ -0,0 +1,67 @@
+from __future__ import annotations
+import hashlib
+import time
+from aiohttp import ClientSession
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+class GPROChat(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "GPROChat"
+ url = "https://gprochat.com"
+ api_endpoint = "https://gprochat.com/api/generate"
+ working = True
+ supports_stream = True
+ supports_message_history = True
+ default_model = 'gemini-pro'
+
+ @staticmethod
+ def generate_signature(timestamp: int, message: str) -> str:
+ secret_key = "2BC120D4-BB36-1B60-26DE-DB630472A3D8"
+ hash_input = f"{timestamp}:{message}:{secret_key}"
+ signature = hashlib.sha256(hash_input.encode('utf-8')).hexdigest()
+ return signature
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+ timestamp = int(time.time() * 1000)
+ prompt = format_prompt(messages)
+ sign = cls.generate_signature(timestamp, prompt)
+
+ headers = {
+ "accept": "*/*",
+ "origin": cls.url,
+ "referer": f"{cls.url}/",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
+ "content-type": "text/plain;charset=UTF-8"
+ }
+
+ data = {
+ "messages": [{"role": "user", "parts": [{"text": prompt}]}],
+ "time": timestamp,
+ "pass": None,
+ "sign": sign
+ }
+
+ async with ClientSession(headers=headers) as session:
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content.iter_any():
+ if chunk:
+ yield chunk.decode()