From f55f867a01b279992470d992fae55cd2e559a9ea Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Fri, 25 Oct 2024 19:43:55 +0300 Subject: feat(g4f/client/client.py): add system prompt support --- g4f/client/client.py | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) (limited to 'g4f/client/client.py') diff --git a/g4f/client/client.py b/g4f/client/client.py index 41238df5..2772f9bb 100644 --- a/g4f/client/client.py +++ b/g4f/client/client.py @@ -149,6 +149,7 @@ class Completions: self, messages: Messages, model: str, + system: str = None, # Added system parameter provider: ProviderType = None, stream: bool = False, proxy: str = None, @@ -161,6 +162,12 @@ class Completions: ignore_stream: bool = False, **kwargs ) -> Union[ChatCompletion, Iterator[ChatCompletionChunk]]: + # If a system prompt is provided, prepend it to the messages + if system: + system_message = {"role": "system", "content": system} + messages = [system_message] + messages + + # Existing implementation continues... model, provider = get_model_and_provider( model, self.provider if provider is None else provider, @@ -221,6 +228,7 @@ class Completions: self, messages: Messages, model: str, + system: str = None, # Added system parameter provider: ProviderType = None, stream: bool = False, proxy: str = None, @@ -233,6 +241,12 @@ class Completions: ignore_stream: bool = False, **kwargs ) -> Union[ChatCompletion, AsyncIterator[ChatCompletionChunk]]: + # If a system prompt is provided, prepend it to the messages + if system: + system_message = {"role": "system", "content": system} + messages = [system_message] + messages + + # Existing implementation continues... model, provider = get_model_and_provider( model, self.provider if provider is None else provider, @@ -271,16 +285,18 @@ class Completions: **kwargs ) - # Removed 'await' here since 'async_iter_response' returns an async generator - response = async_iter_response(response, stream, response_format, max_tokens, stop) - response = async_iter_append_model_and_provider(response) - + # Handle streaming or non-streaming responses if stream: + response = async_iter_response(response, stream, response_format, max_tokens, stop) + response = async_iter_append_model_and_provider(response) return response else: + response = async_iter_response(response, stream, response_format, max_tokens, stop) + response = async_iter_append_model_and_provider(response) async for result in response: return result + class Chat: completions: Completions @@ -401,6 +417,12 @@ class Image: def __repr__(self): return f"Image(url={self.url}, b64_json={'' if self.b64_json else None})" + def to_dict(self): + return { + "url": self.url, + "b64_json": self.b64_json + } + class ImagesResponse: def __init__(self, data: list[Image]): self.data = data -- cgit v1.2.3 From e79c8b01f58d21502c962f38c804bf81196f89fb Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Tue, 29 Oct 2024 22:03:05 +0200 Subject: Update (docs/async_client.md docs/client.md docs/interference-api.md g4f/client/client.py) --- g4f/client/client.py | 31 +++++-------------------------- 1 file changed, 5 insertions(+), 26 deletions(-) (limited to 'g4f/client/client.py') diff --git a/g4f/client/client.py b/g4f/client/client.py index 2772f9bb..44d99d60 100644 --- a/g4f/client/client.py +++ b/g4f/client/client.py @@ -149,7 +149,6 @@ class Completions: self, messages: Messages, model: str, - system: str = None, # Added system parameter provider: ProviderType = None, stream: bool = False, proxy: str = None, @@ -162,12 +161,6 @@ class Completions: ignore_stream: bool = False, **kwargs ) -> Union[ChatCompletion, Iterator[ChatCompletionChunk]]: - # If a system prompt is provided, prepend it to the messages - if system: - system_message = {"role": "system", "content": system} - messages = [system_message] + messages - - # Existing implementation continues... model, provider = get_model_and_provider( model, self.provider if provider is None else provider, @@ -228,7 +221,6 @@ class Completions: self, messages: Messages, model: str, - system: str = None, # Added system parameter provider: ProviderType = None, stream: bool = False, proxy: str = None, @@ -241,12 +233,6 @@ class Completions: ignore_stream: bool = False, **kwargs ) -> Union[ChatCompletion, AsyncIterator[ChatCompletionChunk]]: - # If a system prompt is provided, prepend it to the messages - if system: - system_message = {"role": "system", "content": system} - messages = [system_message] + messages - - # Existing implementation continues... model, provider = get_model_and_provider( model, self.provider if provider is None else provider, @@ -285,18 +271,16 @@ class Completions: **kwargs ) - # Handle streaming or non-streaming responses + # Removed 'await' here since 'async_iter_response' returns an async generator + response = async_iter_response(response, stream, response_format, max_tokens, stop) + response = async_iter_append_model_and_provider(response) + if stream: - response = async_iter_response(response, stream, response_format, max_tokens, stop) - response = async_iter_append_model_and_provider(response) return response else: - response = async_iter_response(response, stream, response_format, max_tokens, stop) - response = async_iter_append_model_and_provider(response) async for result in response: return result - class Chat: completions: Completions @@ -417,12 +401,6 @@ class Image: def __repr__(self): return f"Image(url={self.url}, b64_json={'' if self.b64_json else None})" - def to_dict(self): - return { - "url": self.url, - "b64_json": self.b64_json - } - class ImagesResponse: def __init__(self, data: list[Image]): self.data = data @@ -530,3 +508,4 @@ class Images: async def create_variation(self, image: Union[str, bytes], model: str = None, response_format: str = "url", **kwargs): # Existing implementation, adjust if you want to support b64_json here as well pass + -- cgit v1.2.3