summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/GetGpt.py
diff options
context:
space:
mode:
authorTekky <98614666+xtekky@users.noreply.github.com>2023-08-17 14:50:35 +0200
committerGitHub <noreply@github.com>2023-08-17 14:50:35 +0200
commite8066c1df55c429a72e0770ee0e30a0ffc03f019 (patch)
treec8bc44917ea03909cf586140f984ff0814bc30ea /g4f/Provider/GetGpt.py
parent~ | small fixes & new pypi version | v-0.0.1.9 (diff)
parentrefactor: refactor provider (diff)
downloadgpt4free-e8066c1df55c429a72e0770ee0e30a0ffc03f019.tar
gpt4free-e8066c1df55c429a72e0770ee0e30a0ffc03f019.tar.gz
gpt4free-e8066c1df55c429a72e0770ee0e30a0ffc03f019.tar.bz2
gpt4free-e8066c1df55c429a72e0770ee0e30a0ffc03f019.tar.lz
gpt4free-e8066c1df55c429a72e0770ee0e30a0ffc03f019.tar.xz
gpt4free-e8066c1df55c429a72e0770ee0e30a0ffc03f019.tar.zst
gpt4free-e8066c1df55c429a72e0770ee0e30a0ffc03f019.zip
Diffstat (limited to 'g4f/Provider/GetGpt.py')
-rw-r--r--g4f/Provider/GetGpt.py87
1 files changed, 87 insertions, 0 deletions
diff --git a/g4f/Provider/GetGpt.py b/g4f/Provider/GetGpt.py
new file mode 100644
index 00000000..fb581ecb
--- /dev/null
+++ b/g4f/Provider/GetGpt.py
@@ -0,0 +1,87 @@
+import json
+import os
+import uuid
+
+import requests
+from Crypto.Cipher import AES
+
+from ..typing import Any, CreateResult
+from .base_provider import BaseProvider
+
+
+class GetGpt(BaseProvider):
+ url = "https://chat.getgpt.world/"
+ supports_stream = True
+ working = True
+ supports_gpt_35_turbo = True
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: list[dict[str, str]],
+ stream: bool,
+ **kwargs: Any,
+ ) -> CreateResult:
+ headers = {
+ "Content-Type": "application/json",
+ "Referer": "https://chat.getgpt.world/",
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
+ }
+ data = json.dumps(
+ {
+ "messages": messages,
+ "frequency_penalty": kwargs.get("frequency_penalty", 0),
+ "max_tokens": kwargs.get("max_tokens", 4000),
+ "model": "gpt-3.5-turbo",
+ "presence_penalty": kwargs.get("presence_penalty", 0),
+ "temperature": kwargs.get("temperature", 1),
+ "top_p": kwargs.get("top_p", 1),
+ "stream": True,
+ "uuid": str(uuid.uuid4()),
+ }
+ )
+
+ res = requests.post(
+ "https://chat.getgpt.world/api/chat/stream",
+ headers=headers,
+ json={"signature": _encrypt(data)},
+ stream=True,
+ )
+
+ res.raise_for_status()
+ for line in res.iter_lines():
+ if b"content" in line:
+ line_json = json.loads(line.decode("utf-8").split("data: ")[1])
+ yield (line_json["choices"][0]["delta"]["content"])
+
+ @classmethod
+ @property
+ def params(cls):
+ params = [
+ ("model", "str"),
+ ("messages", "list[dict[str, str]]"),
+ ("stream", "bool"),
+ ("temperature", "float"),
+ ("presence_penalty", "int"),
+ ("frequency_penalty", "int"),
+ ("top_p", "int"),
+ ("max_tokens", "int"),
+ ]
+ param = ", ".join([": ".join(p) for p in params])
+ return f"g4f.provider.{cls.__name__} supports: ({param})"
+
+
+def _encrypt(e: str):
+ t = os.urandom(8).hex().encode("utf-8")
+ n = os.urandom(8).hex().encode("utf-8")
+ r = e.encode("utf-8")
+ cipher = AES.new(t, AES.MODE_CBC, n)
+ ciphertext = cipher.encrypt(_pad_data(r))
+ return ciphertext.hex() + t.decode("utf-8") + n.decode("utf-8")
+
+
+def _pad_data(data: bytes) -> bytes:
+ block_size = AES.block_size
+ padding_size = block_size - len(data) % block_size
+ padding = bytes([padding_size] * padding_size)
+ return data + padding