diff options
author | Raju Komati <komatiraju032@gmail.com> | 2023-04-27 22:57:06 +0200 |
---|---|---|
committer | Raju Komati <komatiraju032@gmail.com> | 2023-04-27 22:57:06 +0200 |
commit | b206a1eb6364e519741ceb89e3f3b63910b4e1ef (patch) | |
tree | a5b02f50f60f4e307fa719f053b88f5df478b4e3 /openai_rev | |
parent | added main module for accessing all services (diff) | |
download | gpt4free-b206a1eb6364e519741ceb89e3f3b63910b4e1ef.tar gpt4free-b206a1eb6364e519741ceb89e3f3b63910b4e1ef.tar.gz gpt4free-b206a1eb6364e519741ceb89e3f3b63910b4e1ef.tar.bz2 gpt4free-b206a1eb6364e519741ceb89e3f3b63910b4e1ef.tar.lz gpt4free-b206a1eb6364e519741ceb89e3f3b63910b4e1ef.tar.xz gpt4free-b206a1eb6364e519741ceb89e3f3b63910b4e1ef.tar.zst gpt4free-b206a1eb6364e519741ceb89e3f3b63910b4e1ef.zip |
Diffstat (limited to 'openai_rev')
-rw-r--r-- | openai_rev/__init__.py | 1 | ||||
-rw-r--r-- | openai_rev/forefront/__init__.py | 18 | ||||
-rw-r--r-- | openai_rev/openai_rev.py | 23 | ||||
-rw-r--r-- | openai_rev/phind/README.md | 37 | ||||
-rw-r--r-- | openai_rev/quora/__init__.py | 19 |
5 files changed, 29 insertions, 69 deletions
diff --git a/openai_rev/__init__.py b/openai_rev/__init__.py index e69de29b..9076abe9 100644 --- a/openai_rev/__init__.py +++ b/openai_rev/__init__.py @@ -0,0 +1 @@ +from .openai_rev import Provider diff --git a/openai_rev/forefront/__init__.py b/openai_rev/forefront/__init__.py index bef10e9e..10202615 100644 --- a/openai_rev/forefront/__init__.py +++ b/openai_rev/forefront/__init__.py @@ -1,9 +1,9 @@ from json import loads from re import match from time import time, sleep +from typing import Generator, Optional from uuid import uuid4 -from altair.vegalite.v3 import Generator from fake_useragent import UserAgent from requests import post from tls_client import Session @@ -14,7 +14,7 @@ from .models import ForeFrontResponse class Account: @staticmethod - def create(proxy=None, logging=False): + def create(proxy: Optional[str] = None, logging: bool = False): proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else False start = time() @@ -57,7 +57,8 @@ class Account: while True: sleep(1) for _ in mail_client.fetch_inbox(): - print(mail_client.get_message_content(_['id'])) + if logging: + print(mail_client.get_message_content(_['id'])) mail_token = match(r'(\d){5,6}', mail_client.get_message_content(_['id'])).group(0) if mail_token: @@ -166,7 +167,9 @@ class Completion: default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default model='gpt-4', ) -> ForeFrontResponse: + text = '' final_response = None + res = list(StreamingCompletion.create(token=token, prompt=prompt)) for response in StreamingCompletion.create( token=token, chat_id=chat_id, @@ -175,6 +178,13 @@ class Completion: default_persona=default_persona, model=model, ): - final_response = response + if response: + final_response = response + text += response.text + + if final_response: + final_response.text = text + else: + raise Exception('Unable to get the response, Please try again') return final_response diff --git a/openai_rev/openai_rev.py b/openai_rev/openai_rev.py index 6e1341c7..7d483c01 100644 --- a/openai_rev/openai_rev.py +++ b/openai_rev/openai_rev.py @@ -1,12 +1,14 @@ from enum import Enum -import quora -import you +from openai_rev import forefront +from openai_rev import quora +from openai_rev import you class Provider(Enum): You = 'you' Poe = 'poe' + ForeFront = 'fore_front' class Completion: @@ -16,20 +18,17 @@ class Completion: return Completion.__poe_service(prompt, **kwargs) elif provider == Provider.You: return Completion.__you_service(prompt, **kwargs) + elif provider == Provider.ForeFront: + return Completion.__fore_front_service(prompt, **kwargs) @classmethod def __you_service(cls, prompt: str, **kwargs) -> str: - return you.Completion.create(prompt).text + return you.Completion.create(prompt, **kwargs).text @classmethod def __poe_service(cls, prompt: str, **kwargs) -> str: - return quora.Completion.create(prompt=prompt).text + return quora.Completion.create(prompt=prompt, **kwargs).text - -# usage You -response = Completion.create(Provider.You, prompt='Write a poem on Lionel Messi') -print(response) - -# usage Poe -response = Completion.create(Provider.Poe, prompt='Write a poem on Lionel Messi', token='GKzCahZYGKhp76LfE197xw==') -print(response) + @classmethod + def __fore_front_service(cls, prompt: str, **kwargs) -> str: + return forefront.Completion.create(prompt=prompt, **kwargs).text diff --git a/openai_rev/phind/README.md b/openai_rev/phind/README.md deleted file mode 100644 index 85288c06..00000000 --- a/openai_rev/phind/README.md +++ /dev/null @@ -1,37 +0,0 @@ -### Example: `phind` (use like openai pypi package) <a name="example-phind"></a> - -```python - -from openai_rev import phind - -# set cf_clearance cookie (needed again) -phind.cf_clearance = 'xx.xx-1682166681-0-160' -phind.user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36' # same as the one from browser you got cf_clearance from - -prompt = 'who won the quatar world cup' - -# help needed: not getting newlines from the stream, please submit a PR if you know how to fix this -# stream completion -for result in phind.StreamingCompletion.create( - model='gpt-4', - prompt=prompt, - results=phind.Search.create(prompt, actualSearch=True), - # create search (set actualSearch to False to disable internet) - creative=False, - detailed=False, - codeContext=''): # up to 3000 chars of code - - print(result.completion.choices[0].text, end='', flush=True) - -# normal completion -result = phind.Completion.create( - model='gpt-4', - prompt=prompt, - results=phind.Search.create(prompt, actualSearch=True), - # create search (set actualSearch to False to disable internet) - creative=False, - detailed=False, - codeContext='') # up to 3000 chars of code - -print(result.completion.choices[0].text) -``` diff --git a/openai_rev/quora/__init__.py b/openai_rev/quora/__init__.py index f5d9e96e..5303f206 100644 --- a/openai_rev/quora/__init__.py +++ b/openai_rev/quora/__init__.py @@ -108,11 +108,6 @@ class Model: description: str = 'gpt-3.5 language model from openai, skidded by poe.com', handle: str = None, ) -> ModelResponse: - models = { - 'gpt-3.5-turbo': 'chinchilla', - 'claude-instant-v1.0': 'a2', - 'gpt-4': 'beaver', - } if not handle: handle = f'gptx{randint(1111111, 9999999)}' @@ -148,7 +143,7 @@ class Model: obj={ 'queryName': 'CreateBotMain_poeBotCreate_Mutation', 'variables': { - 'model': models[model], + 'model': MODELS[model], 'handle': handle, 'prompt': system_prompt, 'isPromptPublic': True, @@ -337,15 +332,7 @@ class Completion: prompt: str = 'hello world', token: str = '', ) -> PoeResponse: - models = { - 'sage': 'capybara', - 'gpt-4': 'beaver', - 'claude-v1.2': 'a2_2', - 'claude-instant-v1.0': 'a2', - 'gpt-3.5-turbo': 'chinchilla', - } - - _model = models[model] if not custom_model else custom_model + _model = MODELS[model] if not custom_model else custom_model client = PoeClient(token) @@ -359,7 +346,7 @@ class Completion: 'object': 'text_completion', 'created': chunk['creationTime'], 'model': _model, - 'text': chunk['text_new'], + 'text': chunk['text'], 'choices': [ { 'text': chunk['text'], |