From e02094de5baac85613855c8a6c9ae1298324ad0e Mon Sep 17 00:00:00 2001 From: "t.me/xtekky" <98614666+xtekky@users.noreply.github.com> Date: Sun, 30 Apr 2023 12:42:21 +0100 Subject: added a site and some refratoring --- gpt4free/__init__.py | 9 +++++ gpt4free/usesless/README.md | 23 +++++++++++ gpt4free/usesless/__init__.py | 55 ++++++++++++++++++++++++++ gui/streamlit_chat_app.py | 1 - requirements.txt | 2 +- test.py | 5 --- testing/theb_test.py | 5 +++ testing/useless_test.py | 27 +++++++++++++ unfinished/chatpdf/__init__.py | 87 ++++++++++++++++++++++------------------- unfinished/usesless/README.md | 23 ----------- unfinished/usesless/__init__.py | 51 ------------------------ 11 files changed, 167 insertions(+), 121 deletions(-) create mode 100644 gpt4free/usesless/README.md create mode 100644 gpt4free/usesless/__init__.py delete mode 100644 test.py create mode 100644 testing/theb_test.py create mode 100644 testing/useless_test.py delete mode 100644 unfinished/usesless/README.md delete mode 100644 unfinished/usesless/__init__.py diff --git a/gpt4free/__init__.py b/gpt4free/__init__.py index 5336c825..b4742b64 100644 --- a/gpt4free/__init__.py +++ b/gpt4free/__init__.py @@ -5,6 +5,7 @@ from gpt4free import forefront from gpt4free import quora from gpt4free import theb from gpt4free import you +from gpt4free import usesless class Provider(Enum): @@ -15,6 +16,7 @@ class Provider(Enum): ForeFront = 'fore_front' Theb = 'theb' CoCalc = 'cocalc' + UseLess = 'useless' class Completion: @@ -22,6 +24,7 @@ class Completion: @staticmethod def create(provider: Provider, prompt: str, **kwargs) -> str: + """ Invokes the given provider with given prompt and addition arguments and returns the string response @@ -40,8 +43,14 @@ class Completion: return Completion.__theb_service(prompt, **kwargs) elif provider == Provider.CoCalc: return Completion.__cocalc_service(prompt, **kwargs) + elif provider == Provider.UseLess: + return Completion.__useless_service(prompt, **kwargs) else: raise Exception('Provider not exist, Please try again') + + @staticmethod + def __useless_service(prompt: str, **kwargs) -> str: + return usesless.Completion.create(prompt = prompt, **kwargs) @staticmethod def __you_service(prompt: str, **kwargs) -> str: diff --git a/gpt4free/usesless/README.md b/gpt4free/usesless/README.md new file mode 100644 index 00000000..13e9df8c --- /dev/null +++ b/gpt4free/usesless/README.md @@ -0,0 +1,23 @@ +ai.usesless.com + +to do: + +- use random user agent in header +- make the code better I guess (?) + +### Example: `usesless` + +```python +import usesless + +message_id = "" +while True: + prompt = input("Question: ") + if prompt == "!stop": + break + + req = usesless.Completion.create(prompt=prompt, parentMessageId=message_id) + + print(f"Answer: {req['text']}") + message_id = req["id"] +``` diff --git a/gpt4free/usesless/__init__.py b/gpt4free/usesless/__init__.py new file mode 100644 index 00000000..6029009d --- /dev/null +++ b/gpt4free/usesless/__init__.py @@ -0,0 +1,55 @@ +import requests +import json + + +class Completion: + headers = { + "authority": "ai.usesless.com", + "accept": "application/json, text/plain, */*", + "accept-language": "en-US,en;q=0.5", + "cache-control": "no-cache", + "sec-fetch-dest": "empty", + "sec-fetch-mode": "cors", + "sec-fetch-site": "same-origin", + "user-agent": "Mozilla/5.0 (X11; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/112.0", + } + + @staticmethod + def create( + systemMessage: str = "You are a helpful assistant", + prompt: str = "", + parentMessageId: str = "", + presence_penalty: float = 1, + temperature: float = 1, + model: str = "gpt-3.5-turbo", + ): + print(parentMessageId, prompt) + + json_data = { + "openaiKey": "", + "prompt": prompt, + "options": { + "parentMessageId": parentMessageId, + "systemMessage": systemMessage, + "completionParams": { + "presence_penalty": presence_penalty, + "temperature": temperature, + "model": model, + }, + }, + } + + url = "https://ai.usesless.com/api/chat-process" + request = requests.post(url, headers=Completion.headers, json=json_data) + content = request.content + + response = Completion.__response_to_json(content) + return response + + @classmethod + def __response_to_json(cls, text) -> dict: + text = str(text.decode("utf-8")) + + split_text = text.rsplit("\n", 1)[1] + to_json = json.loads(split_text) + return to_json diff --git a/gui/streamlit_chat_app.py b/gui/streamlit_chat_app.py index fc5c8d8e..6abc9caf 100644 --- a/gui/streamlit_chat_app.py +++ b/gui/streamlit_chat_app.py @@ -11,7 +11,6 @@ import pickle conversations_file = "conversations.pkl" - def load_conversations(): try: with open(conversations_file, "rb") as f: diff --git a/requirements.txt b/requirements.txt index d30b29aa..366502b5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,4 +10,4 @@ selenium fake-useragent twocaptcha https://github.com/AI-Yash/st-chat/archive/refs/pull/24/head.zip -pydantic +pydantic \ No newline at end of file diff --git a/test.py b/test.py deleted file mode 100644 index 0fd2ec8b..00000000 --- a/test.py +++ /dev/null @@ -1,5 +0,0 @@ -from gpt4free import theb - -for token in theb.Completion.create('hello world'): - print(token, end='', flush=True) - print('asdsos') \ No newline at end of file diff --git a/testing/theb_test.py b/testing/theb_test.py new file mode 100644 index 00000000..0fd2ec8b --- /dev/null +++ b/testing/theb_test.py @@ -0,0 +1,5 @@ +from gpt4free import theb + +for token in theb.Completion.create('hello world'): + print(token, end='', flush=True) + print('asdsos') \ No newline at end of file diff --git a/testing/useless_test.py b/testing/useless_test.py new file mode 100644 index 00000000..9b613aac --- /dev/null +++ b/testing/useless_test.py @@ -0,0 +1,27 @@ +from gpt4free import usesless + +message_id = "" +while True: + prompt = input("Question: ") + if prompt == "!stop": + break + + req = usesless.Completion.create(prompt=prompt, parentMessageId=message_id) + + print(f"Answer: {req['text']}") + message_id = req["id"] + + +import gpt4free + +message_id = "" +while True: + prompt = input("Question: ") + if prompt == "!stop": + break + + req = gpt4free.Completion.create(provider = gpt4free.Provider.UseLess, + prompt=prompt, parentMessageId=message_id) + + print(f"Answer: {req['text']}") + message_id = req["id"] \ No newline at end of file diff --git a/unfinished/chatpdf/__init__.py b/unfinished/chatpdf/__init__.py index 4c9d2d3e..30dc1d3e 100644 --- a/unfinished/chatpdf/__init__.py +++ b/unfinished/chatpdf/__init__.py @@ -1,59 +1,66 @@ import requests import json +from queue import Queue, Empty +from threading import Thread +from json import loads +from re import findall + + class Completion: def request(prompt: str): '''TODO: some sort of authentication + upload PDF from URL or local file - Then you should get the atoken and chat ID - ''' - + Then you should get the atoken and chat ID + ''' + token = "your_token_here" chat_id = "your_chat_id_here" url = "https://chat-pr4yueoqha-ue.a.run.app/" payload = json.dumps({ - "v": 2, - "chatSession": { - "type": "join", - "chatId": chat_id - }, - "history": [ - { - "id": "VNsSyJIq_0", - "author": "p_if2GPSfyN8hjDoA7unYe", - "msg": "", - "time": 1682672009270 - }, - { - "id": "Zk8DRUtx_6", - "author": "uplaceholder", - "msg": prompt, - "time": 1682672181339 - } - ] - }) - + "v": 2, + "chatSession": { + "type": "join", + "chatId": chat_id + }, + "history": [ + { + "id": "VNsSyJIq_0", + "author": "p_if2GPSfyN8hjDoA7unYe", + "msg": "", + "time": 1682672009270 + }, + { + "id": "Zk8DRUtx_6", + "author": "uplaceholder", + "msg": prompt, + "time": 1682672181339 + } + ] + }) + # TODO: fix headers, use random user-agent, streaming response, etc headers = { - 'authority': 'chat-pr4yueoqha-ue.a.run.app', - 'accept': '*/*', - 'accept-language': 'en-US,en;q=0.9', - 'atoken': token, - 'content-type': 'application/json', - 'origin': 'https://www.chatpdf.com', - 'referer': 'https://www.chatpdf.com/', - 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"Windows"', - 'sec-fetch-dest': 'empty', - 'sec-fetch-mode': 'cors', - 'sec-fetch-site': 'cross-site', - 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36' - } + 'authority': 'chat-pr4yueoqha-ue.a.run.app', + 'accept': '*/*', + 'accept-language': 'en-US,en;q=0.9', + 'atoken': token, + 'content-type': 'application/json', + 'origin': 'https://www.chatpdf.com', + 'referer': 'https://www.chatpdf.com/', + 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"Windows"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'cross-site', + 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36' + } - response = requests.request("POST", url, headers=headers, data=payload).text + response = requests.request( + "POST", url, headers=headers, data=payload).text Completion.stream_completed = True return {'response': response} diff --git a/unfinished/usesless/README.md b/unfinished/usesless/README.md deleted file mode 100644 index 13e9df8c..00000000 --- a/unfinished/usesless/README.md +++ /dev/null @@ -1,23 +0,0 @@ -ai.usesless.com - -to do: - -- use random user agent in header -- make the code better I guess (?) - -### Example: `usesless` - -```python -import usesless - -message_id = "" -while True: - prompt = input("Question: ") - if prompt == "!stop": - break - - req = usesless.Completion.create(prompt=prompt, parentMessageId=message_id) - - print(f"Answer: {req['text']}") - message_id = req["id"] -``` diff --git a/unfinished/usesless/__init__.py b/unfinished/usesless/__init__.py deleted file mode 100644 index 6f9a47ef..00000000 --- a/unfinished/usesless/__init__.py +++ /dev/null @@ -1,51 +0,0 @@ -import requests -import json - - -class Completion: - headers = { - "authority": "ai.usesless.com", - "accept": "application/json, text/plain, */*", - "accept-language": "en-US,en;q=0.5", - "cache-control": "no-cache", - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/112.0", - } - - @staticmethod - def create( - systemMessage: str = "You are a helpful assistant", - prompt: str = "", - parentMessageId: str = "", - presence_penalty: float = 1, - temperature: float = 1, - model: str = "gpt-3.5-turbo", - ): - json_data = { - "openaiKey": "", - "prompt": prompt, - "options": { - "parentMessageId": parentMessageId, - "systemMessage": systemMessage, - "completionParams": { - "presence_penalty": presence_penalty, - "temperature": temperature, - "model": model, - }, - }, - } - - url = "https://ai.usesless.com/api/chat-process" - request = requests.post(url, headers=Completion.headers, json=json_data) - content = request.content - response = Completion.__response_to_json(content) - return response - - @classmethod - def __response_to_json(cls, text) -> dict: - text = str(text.decode("utf-8")) - split_text = text.rsplit("\n", 1)[1] - to_json = json.loads(split_text) - return to_json -- cgit v1.2.3 From 5867f14aa43f261e44861e613cb56fdac45d39e3 Mon Sep 17 00:00:00 2001 From: "t.me/xtekky" <98614666+xtekky@users.noreply.github.com> Date: Sun, 30 Apr 2023 14:19:27 +0100 Subject: Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index a2ec683c..6ae23fc0 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ ##### You may join our discord server for updates and support ; ) -- [Discord Link](https://discord.gg/gpt4free) +- [discord.gg/gpt4free](https://discord.gg/gpt4free) image -- cgit v1.2.3 From 08abfe0ebb24b9b6b83f97f7c00d6b6e88967f8e Mon Sep 17 00:00:00 2001 From: "t.me/xtekky" <98614666+xtekky@users.noreply.github.com> Date: Sun, 30 Apr 2023 18:39:04 +0100 Subject: forefront takedown --- gpt4free/forefront/README.md | 16 ---- gpt4free/forefront/__init__.py | 193 ----------------------------------------- gpt4free/forefront/typing.py | 26 ------ 3 files changed, 235 deletions(-) delete mode 100644 gpt4free/forefront/README.md delete mode 100644 gpt4free/forefront/__init__.py delete mode 100644 gpt4free/forefront/typing.py diff --git a/gpt4free/forefront/README.md b/gpt4free/forefront/README.md deleted file mode 100644 index 3d0aac4d..00000000 --- a/gpt4free/forefront/README.md +++ /dev/null @@ -1,16 +0,0 @@ -### Example: `forefront` (use like openai pypi package) - -```python - -from gpt4free import forefront - -# create an account -token = forefront.Account.create(logging=False) -print(token) - -# get a response -for response in forefront.StreamingCompletion.create(token=token, - prompt='hello world', model='gpt-4'): - print(response.completion.choices[0].text, end='') -print("") -``` diff --git a/gpt4free/forefront/__init__.py b/gpt4free/forefront/__init__.py deleted file mode 100644 index 7a4f4f28..00000000 --- a/gpt4free/forefront/__init__.py +++ /dev/null @@ -1,193 +0,0 @@ -from json import loads -from re import findall -from time import time, sleep -from typing import Generator, Optional -from uuid import uuid4 - -from fake_useragent import UserAgent -from requests import post -from pymailtm import MailTm, Message -from tls_client import Session - -from .typing import ForeFrontResponse - - -class Account: - @staticmethod - def create(proxy: Optional[str] = None, logging: bool = False): - proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else False - - start = time() - - mail_client = MailTm().get_account() - mail_address = mail_client.address - - client = Session(client_identifier='chrome110') - client.proxies = proxies - client.headers = { - 'origin': 'https://accounts.forefront.ai', - 'user-agent': UserAgent().random, - } - - response = client.post( - 'https://clerk.forefront.ai/v1/client/sign_ups?_clerk_js_version=4.38.4', - data={'email_address': mail_address}, - ) - - try: - trace_token = response.json()['response']['id'] - if logging: - print(trace_token) - except KeyError: - return 'Failed to create account!' - - response = client.post( - f'https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/prepare_verification?_clerk_js_version=4.38.4', - data={ - 'strategy': 'email_link', - 'redirect_url': 'https://accounts.forefront.ai/sign-up/verify' - }, - ) - - if logging: - print(response.text) - - if 'sign_up_attempt' not in response.text: - return 'Failed to create account!' - - while True: - sleep(1) - new_message: Message = mail_client.wait_for_message() - if logging: - print(new_message.data['id']) - - verification_url = findall(r'https:\/\/clerk\.forefront\.ai\/v1\/verify\?token=\w.+', new_message.text)[0] - - if verification_url: - break - - if logging: - print(verification_url) - - response = client.get(verification_url) - - response = client.get('https://clerk.forefront.ai/v1/client?_clerk_js_version=4.38.4') - - token = response.json()['response']['sessions'][0]['last_active_token']['jwt'] - - with open('accounts.txt', 'a') as f: - f.write(f'{mail_address}:{token}\n') - - if logging: - print(time() - start) - - return token - - -class StreamingCompletion: - @staticmethod - def create( - token=None, - chat_id=None, - prompt='', - action_type='new', - default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default - model='gpt-4', - proxy=None - ) -> Generator[ForeFrontResponse, None, None]: - if not token: - raise Exception('Token is required!') - if not chat_id: - chat_id = str(uuid4()) - - proxies = { 'http': 'http://' + proxy, 'https': 'http://' + proxy } if proxy else None - - headers = { - 'authority': 'chat-server.tenant-forefront-default.knative.chi.coreweave.com', - 'accept': '*/*', - 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', - 'authorization': 'Bearer ' + token, - 'cache-control': 'no-cache', - 'content-type': 'application/json', - 'origin': 'https://chat.forefront.ai', - 'pragma': 'no-cache', - 'referer': 'https://chat.forefront.ai/', - 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"macOS"', - 'sec-fetch-dest': 'empty', - 'sec-fetch-mode': 'cors', - 'sec-fetch-site': 'cross-site', - 'user-agent': UserAgent().random, - } - - json_data = { - 'text': prompt, - 'action': action_type, - 'parentId': chat_id, - 'workspaceId': chat_id, - 'messagePersona': default_persona, - 'model': model, - } - - for chunk in post( - 'https://chat-server.tenant-forefront-default.knative.chi.coreweave.com/chat', - headers=headers, - proxies=proxies, - json=json_data, - stream=True, - ).iter_lines(): - if b'finish_reason":null' in chunk: - data = loads(chunk.decode('utf-8').split('data: ')[1]) - token = data['choices'][0]['delta'].get('content') - - if token is not None: - yield ForeFrontResponse( - **{ - 'id': chat_id, - 'object': 'text_completion', - 'created': int(time()), - 'text': token, - 'model': model, - 'choices': [{'text': token, 'index': 0, 'logprobs': None, 'finish_reason': 'stop'}], - 'usage': { - 'prompt_tokens': len(prompt), - 'completion_tokens': len(token), - 'total_tokens': len(prompt) + len(token), - }, - } - ) - - -class Completion: - @staticmethod - def create( - token=None, - chat_id=None, - prompt='', - action_type='new', - default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default - model='gpt-4', - proxy=None - ) -> ForeFrontResponse: - text = '' - final_response = None - for response in StreamingCompletion.create( - token=token, - chat_id=chat_id, - prompt=prompt, - action_type=action_type, - default_persona=default_persona, - model=model, - proxy=proxy - ): - if response: - final_response = response - text += response.text - - if final_response: - final_response.text = text - else: - raise Exception('Unable to get the response, Please try again') - - return final_response diff --git a/gpt4free/forefront/typing.py b/gpt4free/forefront/typing.py deleted file mode 100644 index 23e90903..00000000 --- a/gpt4free/forefront/typing.py +++ /dev/null @@ -1,26 +0,0 @@ -from typing import Any, List - -from pydantic import BaseModel - - -class Choice(BaseModel): - text: str - index: int - logprobs: Any - finish_reason: str - - -class Usage(BaseModel): - prompt_tokens: int - completion_tokens: int - total_tokens: int - - -class ForeFrontResponse(BaseModel): - id: str - object: str - created: int - model: str - choices: List[Choice] - usage: Usage - text: str -- cgit v1.2.3 From f710441a6d95816e8998a5d1d2363347fb87b88c Mon Sep 17 00:00:00 2001 From: tr7zw Date: Sun, 30 Apr 2023 20:57:31 +0000 Subject: Fix docker-compose.yml The image/dockerfile lines are incorrectly indented, also the read_only flag prevents the UI from starting due to not being able to write to the /tmp folder. --- docker-compose.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index e8fcb0de..e8e7119b 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -4,11 +4,9 @@ services: gpt4: build: context: . - dockerfile: Dockerfile - image: gpt4free:latest + dockerfile: Dockerfile + image: gpt4free:latest container_name: gpt4 ports: - 8501:8501 restart: unless-stopped - read_only: true - -- cgit v1.2.3 From b4aadbbac588edff8902fa802024779f51a5ae39 Mon Sep 17 00:00:00 2001 From: "t.me/xtekky" <98614666+xtekky@users.noreply.github.com> Date: Sun, 30 Apr 2023 22:09:25 +0100 Subject: forefront --- gpt4free/forefront/README.md | 13 +++ gpt4free/forefront/__init__.py | 194 +++++++++++++++++++++++++++++++++++++++++ gpt4free/forefront/typing.py | 25 ++++++ 3 files changed, 232 insertions(+) create mode 100644 gpt4free/forefront/README.md create mode 100644 gpt4free/forefront/__init__.py create mode 100644 gpt4free/forefront/typing.py diff --git a/gpt4free/forefront/README.md b/gpt4free/forefront/README.md new file mode 100644 index 00000000..35ba9897 --- /dev/null +++ b/gpt4free/forefront/README.md @@ -0,0 +1,13 @@ +### Example: `forefront` (use like openai pypi package) + +```python +from gpt4free import forefront +# create an account +token = forefront.Account.create(logging=False) +print(token) +# get a response +for response in forefront.StreamingCompletion.create(token=token, + prompt='hello world', model='gpt-4'): + print(response.completion.choices[0].text, end='') +print("") +``` \ No newline at end of file diff --git a/gpt4free/forefront/__init__.py b/gpt4free/forefront/__init__.py new file mode 100644 index 00000000..969b33b8 --- /dev/null +++ b/gpt4free/forefront/__init__.py @@ -0,0 +1,194 @@ +from json import loads +from re import findall +from time import time, sleep +from typing import Generator, Optional +from uuid import uuid4 + +from fake_useragent import UserAgent +from requests import post +from pymailtm import MailTm, Message +from tls_client import Session + +from .typing import ForeFrontResponse + + +class Account: + @staticmethod + def create(proxy: Optional[str] = None, logging: bool = False): + proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else False + + start = time() + + mail_client = MailTm().get_account() + mail_address = mail_client.address + + client = Session(client_identifier='chrome110') + client.proxies = proxies + client.headers = { + 'origin': 'https://accounts.forefront.ai', + 'user-agent': UserAgent().random, + } + + response = client.post( + 'https://clerk.forefront.ai/v1/client/sign_ups?_clerk_js_version=4.38.4', + data={'email_address': mail_address}, + ) + + try: + trace_token = response.json()['response']['id'] + if logging: + print(trace_token) + except KeyError: + return 'Failed to create account!' + + response = client.post( + f'https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/prepare_verification?_clerk_js_version=4.38.4', + data={ + 'strategy': 'email_link', + 'redirect_url': 'https://accounts.forefront.ai/sign-up/verify' + }, + ) + + if logging: + print(response.text) + + if 'sign_up_attempt' not in response.text: + return 'Failed to create account!' + + while True: + sleep(1) + new_message: Message = mail_client.wait_for_message() + if logging: + print(new_message.data['id']) + + verification_url = findall(r'https:\/\/clerk\.forefront\.ai\/v1\/verify\?token=\w.+', new_message.text)[0] + + if verification_url: + break + + if logging: + print(verification_url) + + response = client.get(verification_url) + + response = client.get('https://clerk.forefront.ai/v1/client?_clerk_js_version=4.38.4') + + token = response.json()['response']['sessions'][0]['last_active_token']['jwt'] + + with open('accounts.txt', 'a') as f: + f.write(f'{mail_address}:{token}\n') + + if logging: + print(time() - start) + + return token + + +class StreamingCompletion: + @staticmethod + def create( + token=None, + chat_id=None, + prompt='', + action_type='new', + default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default + model='gpt-4', + proxy=None + ) -> Generator[ForeFrontResponse, None, None]: + if not token: + raise Exception('Token is required!') + if not chat_id: + chat_id = str(uuid4()) + + proxies = { 'http': 'http://' + proxy, 'https': 'http://' + proxy } if proxy else None + + headers = { + 'authority': 'chat-server.tenant-forefront-default.knative.chi.coreweave.com', + 'accept': '*/*', + 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', + 'authorization': 'Bearer ' + token, + 'cache-control': 'no-cache', + 'content-type': 'application/json', + 'origin': 'https://chat.forefront.ai', + 'pragma': 'no-cache', + 'referer': 'https://chat.forefront.ai/', + 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"', + 'sec-ch-ua-mobile': '?0', + 'sec-ch-ua-platform': '"macOS"', + 'sec-fetch-dest': 'empty', + 'sec-fetch-mode': 'cors', + 'sec-fetch-site': 'cross-site', + 'user-agent': UserAgent().random, + } + + json_data = { + 'text': prompt, + 'action': action_type, + 'parentId': chat_id, + 'workspaceId': chat_id, + 'messagePersona': default_persona, + 'model': model, + } + + for chunk in post( + 'https://chat-server.tenant-forefront-default.knative.chi.coreweave.com/chat', + headers=headers, + proxies=proxies, + json=json_data, + stream=True, + ).iter_lines(): + if b'finish_reason":null' in chunk: + data = loads(chunk.decode('utf-8').split('data: ')[1]) + token = data['choices'][0]['delta'].get('content') + + if token is not None: + yield ForeFrontResponse( + **{ + 'id': chat_id, + 'object': 'text_completion', + 'created': int(time()), + 'text': token, + 'model': model, + 'choices': [{'text': token, 'index': 0, 'logprobs': None, 'finish_reason': 'stop'}], + 'usage': { + 'prompt_tokens': len(prompt), + 'completion_tokens': len(token), + 'total_tokens': len(prompt) + len(token), + }, + } + ) + + +class Completion: + @staticmethod + def create( + token=None, + chat_id=None, + prompt='', + action_type='new', + default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default + model='gpt-4', + proxy=None + ) -> ForeFrontResponse: + text = '' + final_response = None + for response in StreamingCompletion.create( + token=token, + chat_id=chat_id, + prompt=prompt, + action_type=action_type, + default_persona=default_persona, + model=model, + proxy=proxy + ): + if response: + final_response = response + text += response.text + + if final_response: + final_response.text = text + else: + raise Exception('Unable to get the response, Please try again') + + return final_response + \ No newline at end of file diff --git a/gpt4free/forefront/typing.py b/gpt4free/forefront/typing.py new file mode 100644 index 00000000..a9025419 --- /dev/null +++ b/gpt4free/forefront/typing.py @@ -0,0 +1,25 @@ +from typing import Any, List +from pydantic import BaseModel + + +class Choice(BaseModel): + text: str + index: int + logprobs: Any + finish_reason: str + + +class Usage(BaseModel): + prompt_tokens: int + completion_tokens: int + total_tokens: int + + +class ForeFrontResponse(BaseModel): + id: str + object: str + created: int + model: str + choices: List[Choice] + usage: Usage + text: str \ No newline at end of file -- cgit v1.2.3 From 6e3cfe41c7fb01dc170aee7b7a10c10247dc8b48 Mon Sep 17 00:00:00 2001 From: Daniel Shemesh Date: Mon, 1 May 2023 01:18:57 +0300 Subject: Update README.md Improved overall design and organization --- README.md | 67 ++++++++++++++++++++++++++++++++++++++------------------------- 1 file changed, 40 insertions(+), 27 deletions(-) diff --git a/README.md b/README.md index 6ae23fc0..8582e414 100644 --- a/README.md +++ b/README.md @@ -1,22 +1,25 @@ -##### You may join our discord server for updates and support ; ) - -- [discord.gg/gpt4free](https://discord.gg/gpt4free) - -image - -Just API's from some language model sites. - -## Legal Notice - -This repository uses third-party APIs and is _not_ associated with or endorsed by the API providers. This project is intended **for educational purposes only**. This is just a little personal project. Sites may contact me to improve their security. - -Please note the following: - -1. **Disclaimer**: The APIs, services, and trademarks mentioned in this repository belong to their respective owners. This project is _not_ claiming any right over them. - -2. **Responsibility**: The author of this repository is _not_ responsible for any consequences arising from the use or misuse of this repository or the content provided by the third-party APIs and any damage or losses caused by users' actions. - -3. **Educational Purposes Only**: This repository and its content are provided strictly for educational purposes. By using the information and code provided, users acknowledge that they are using the APIs and models at their own risk and agree to comply with any applicable laws and regulations. + + + + + + + +
+

gpt4free

+

Join our gpt4free Discord community! gpt4free Discord

+ image +

Just API's from some language model sites.

+
+ + Star History Chart + +
+ + +# Open-Source Projects by xtekky + +
🎁 Projects ⭐ Stars 📚 Forks 🛎 Issues 📬 Pull requests
gpt4free Stars Forks Issues Pull Requests
ChatGPT-Clone Stars Forks Issues Pull Requests
## Table of Contents | Section | Description | Link | Status | @@ -28,9 +31,6 @@ Please note the following: | **Docker** | Instructions on how to run gpt4free in a Docker container | [![Link to Section](https://img.shields.io/badge/Link-Go%20to%20Section-blue)](#docker-instructions) | - | | **ChatGPT clone** | A ChatGPT clone with new features and scalability | [![Link to Website](https://img.shields.io/badge/Link-Visit%20Site-blue)](https://chat.chatbot.sex/chat) | - | | **How to install** | Instructions on how to install gpt4free | [![Link to Section](https://img.shields.io/badge/Link-Go%20to%20Section-blue)](#install) | - | -| **Legal Notice** | Legal notice or disclaimer | [![Link to Section](https://img.shields.io/badge/Link-Go%20to%20Section-blue)](#legal-notice) | - | -| **Copyright** | Copyright information | [![Link to Section](https://img.shields.io/badge/Link-Go%20to%20Section-blue)](#copyright) | - | -| **Star History** | Star History | [![Link to Section](https://img.shields.io/badge/Link-Go%20to%20Section-blue)](#star-history) | - | | **Usage Examples** | | | | | `theb` | Example usage for theb (gpt-3.5) | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](gpt4free/theb/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) | | `forefront` | Example usage for forefront (gpt-4) | [![Link to File](https://img.shields.io/badge/Link-Go%20to%20File-blue)](gpt4free/forefront/README.md) | ![Active](https://img.shields.io/badge/Active-brightgreen) | || @@ -39,8 +39,12 @@ Please note the following: | **Try it Out** | | | | | Google Colab Jupyter Notebook | Example usage for gpt4free | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/DanielShemesh/gpt4free-colab/blob/main/gpt4free.ipynb) | - | | replit Example (feel free to fork this repl) | Example usage for gpt4free | [![](https://img.shields.io/badge/Open%20in-Replit-1A1E27?logo=replit)](https://replit.com/@gpt4free/gpt4free-webui) | - | +| **Legal Notice** | Legal notice or disclaimer | [![Link to Section](https://img.shields.io/badge/Link-Go%20to%20Section-blue)](#legal-notice) | - | +| **Copyright** | Copyright information | [![Link to Section](https://img.shields.io/badge/Link-Go%20to%20Section-blue)](#copyright) | - | + + -## Todo +## To do list - [x] Add a GUI for the repo - [ ] Make a general package named `gpt4free`, instead of different folders @@ -82,6 +86,7 @@ install requirements with: pip3 install -r requirements.txt ``` + ## To start gpt4free GUI Move `streamlit_app.py` from `./gui` to the base folder @@ -121,6 +126,18 @@ docker-compose up -d > This site was developed by me and includes **gpt-4/3.5**, **internet access** and **gpt-jailbreak's** like DAN > run locally here: https://github.com/xtekky/chatgpt-clone +## Legal Notice + +This repository uses third-party APIs and is _not_ associated with or endorsed by the API providers. This project is intended **for educational purposes only**. This is just a little personal project. Sites may contact me to improve their security. + +Please note the following: + +1. **Disclaimer**: The APIs, services, and trademarks mentioned in this repository belong to their respective owners. This project is _not_ claiming any right over them. + +2. **Responsibility**: The author of this repository is _not_ responsible for any consequences arising from the use or misuse of this repository or the content provided by the third-party APIs and any damage or losses caused by users' actions. + +3. **Educational Purposes Only**: This repository and its content are provided strictly for educational purposes. By using the information and code provided, users acknowledge that they are using the APIs and models at their own risk and agree to comply with any applicable laws and regulations. + ## Copyright: This program is licensed under the [GNU GPL v3](https://www.gnu.org/licenses/gpl-3.0.txt) @@ -146,7 +163,3 @@ GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . ``` - -## Star History - -[![Star History Chart](https://api.star-history.com/svg?repos=xtekky/gpt4free&type=Date)](https://star-history.com/#xtekky/gpt4free) -- cgit v1.2.3 From 408a169c410c95f652b42f8344d7b5c90b91693d Mon Sep 17 00:00:00 2001 From: Daniel Shemesh Date: Mon, 1 May 2023 01:29:27 +0300 Subject: Update README.md resized banner + moved star history to end --- README.md | 57 +++++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 39 insertions(+), 18 deletions(-) diff --git a/README.md b/README.md index 8582e414..c73828fa 100644 --- a/README.md +++ b/README.md @@ -1,25 +1,39 @@ +gpt4free logo - - - - - - -
-

gpt4free

-

Join our gpt4free Discord community! gpt4free Discord

- image -

Just API's from some language model sites.

-
- - Star History Chart - -
+Just API's from some language model sites. +

Join our gpt4free Discord community! gpt4free Discord

# Open-Source Projects by xtekky -
🎁 Projects ⭐ Stars 📚 Forks 🛎 Issues 📬 Pull requests
gpt4free Stars Forks Issues Pull Requests
ChatGPT-Clone Stars Forks Issues Pull Requests
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
🎁 Projects⭐ Stars📚 Forks🛎 Issues📬 Pull requests
gpt4freeStarsForksIssuesPull Requests
ChatGPT-CloneStarsForksIssuesPull Requests
+ ## Table of Contents | Section | Description | Link | Status | @@ -41,7 +55,7 @@ | replit Example (feel free to fork this repl) | Example usage for gpt4free | [![](https://img.shields.io/badge/Open%20in-Replit-1A1E27?logo=replit)](https://replit.com/@gpt4free/gpt4free-webui) | - | | **Legal Notice** | Legal notice or disclaimer | [![Link to Section](https://img.shields.io/badge/Link-Go%20to%20Section-blue)](#legal-notice) | - | | **Copyright** | Copyright information | [![Link to Section](https://img.shields.io/badge/Link-Go%20to%20Section-blue)](#copyright) | - | - +| **Star History** | Star History | [![Link to Section](https://img.shields.io/badge/Link-Go%20to%20Section-blue)](#star-history) | - | ## To do list @@ -163,3 +177,10 @@ GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . ``` + + +## Star History + + + Star History Chart + -- cgit v1.2.3 From e47236b13711a36a2281c3e308e81d4d8401b927 Mon Sep 17 00:00:00 2001 From: "t.me/xtekky" <98614666+xtekky@users.noreply.github.com> Date: Mon, 1 May 2023 01:30:46 +0100 Subject: Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index c73828fa..249f7453 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,10 @@ gpt4free logo Just API's from some language model sites. -

Join our gpt4free Discord community! gpt4free Discord

+

Join our [discord.gg/gpt4free](https://discord.com/invite/gpt4free) Discord community! gpt4free Discord

-# Open-Source Projects by xtekky +# Related gpt4free projects -- cgit v1.2.3 From f8f9882fe0fbe9c886d21c55c59e252a459ba534 Mon Sep 17 00:00:00 2001 From: "t.me/xtekky" <98614666+xtekky@users.noreply.github.com> Date: Mon, 1 May 2023 01:31:59 +0100 Subject: Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 249f7453..656031b8 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ gpt4free logo Just API's from some language model sites. -

Join our [discord.gg/gpt4free](https://discord.com/invite/gpt4free) Discord community! gpt4free Discord

+

Join our discord.gg/gpt4free Discord community! gpt4free Discord

# Related gpt4free projects -- cgit v1.2.3 From 8b0cb0cbc1b00f201d8374c3e3b6e582c8827c91 Mon Sep 17 00:00:00 2001 From: "t.me/xtekky" <98614666+xtekky@users.noreply.github.com> Date: Mon, 1 May 2023 01:35:00 +0100 Subject: Update README.md --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 656031b8..9fd505f2 100644 --- a/README.md +++ b/README.md @@ -31,6 +31,13 @@ Just API's from some language model sites. + + + + + + +
Issues Pull Requests
ChatGpt Discord BotStarsForksIssuesPull Requests
-- cgit v1.2.3