From 6c2e3cc53cdb769f80d7fbb4df418cd4ab8aaabb Mon Sep 17 00:00:00 2001 From: abc <98614666+xtekky@users.noreply.github.com> Date: Sat, 23 Sep 2023 11:16:19 +0100 Subject: ~ | improve Vercel & g4f.Completion.create added `.Completion.create` class. ```py response = g4f.Completion.create( model='text-davinci-003', prompt="Hello") print(response) ``` --- g4f/Provider/Vercel.py | 8 ++++++-- g4f/__init__.py | 27 +++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 2 deletions(-) (limited to 'g4f') diff --git a/g4f/Provider/Vercel.py b/g4f/Provider/Vercel.py index df6a5df6..ca124fec 100644 --- a/g4f/Provider/Vercel.py +++ b/g4f/Provider/Vercel.py @@ -48,9 +48,11 @@ class Vercel(BaseProvider): 'playgroundId': str(uuid.uuid4()), 'chatIndex' : 0} | model_info[model]['default_params'] - server_error = True - while server_error: + retries = 0 + max_retries = kwargs.get('max_retries', 20) + + while server_error and not retries > max_retries: response = requests.post('https://sdk.vercel.ai/api/generate', headers=headers, json=json_data, stream=True) @@ -58,6 +60,8 @@ class Vercel(BaseProvider): if token != b'Internal Server Error': server_error = False yield (token.decode()) + + retries += 1 def AntiBotToken() -> str: headers = { diff --git a/g4f/__init__.py b/g4f/__init__.py index f3a887f6..c0f70d8a 100644 --- a/g4f/__init__.py +++ b/g4f/__init__.py @@ -68,3 +68,30 @@ class ChatCompletion: raise Exception(f"Provider: {provider.__name__} doesn't support create_async") return await provider.create_async(model.name, messages, **kwargs) + +class Completion: + @staticmethod + def create( + model : Union[models.Model, str], + prompt : str, + provider : Union[type[BaseProvider], None] = None, + stream : bool = False, **kwargs) -> Union[CreateResult, str]: + + allowed_models = [ + 'code-davinci-002', + 'text-ada-001', + 'text-babbage-001', + 'text-curie-001', + 'text-davinci-002', + 'text-davinci-003' + ] + + if model not in allowed_models: + raise Exception(f'ValueError: Can\'t use {model} with Completion.create()') + + model, provider = get_model_and_provider(model, provider, stream) + + result = provider.create_completion(model.name, + [{"role": "user", "content": prompt}], stream, **kwargs) + + return result if stream else ''.join(result) \ No newline at end of file -- cgit v1.2.3