From 3982f39424ea037aca1086d45c6f657b4bfc457c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E2=B2=98r=E1=B9=A8h=E0=B8=AA=E2=88=82ow?= <71973368+MrShadowDev@users.noreply.github.com> Date: Mon, 23 Oct 2023 09:46:25 +0200 Subject: 'Refactored by Sourcery' (#1125) Co-authored-by: Sourcery AI <> --- g4f/gui/server/backend.py | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) (limited to 'g4f/gui/server/backend.py') diff --git a/g4f/gui/server/backend.py b/g4f/gui/server/backend.py index ef18a61b..2ba0ca8e 100644 --- a/g4f/gui/server/backend.py +++ b/g4f/gui/server/backend.py @@ -35,9 +35,7 @@ class Backend_Api: return 'ok', 200 def models(self): - models = g4f._all_models - - return models + return g4f._all_models def _gen_title(self): return { @@ -52,19 +50,18 @@ class Backend_Api: prompt = request.json['meta']['content']['parts'][0] model = request.json['model'] provider = request.json.get('provider').split('g4f.Provider.')[1] - + messages = special_instructions[jailbreak] + conversation + search(internet_access, prompt) + [prompt] - + def stream(): - if provider: - answer = g4f.ChatCompletion.create(model=model, - provider=get_provider(provider), messages=messages, stream=True) - else: - answer = g4f.ChatCompletion.create(model=model, - messages=messages, stream=True) - - for token in answer: - yield token + yield from g4f.ChatCompletion.create( + model=model, + provider=get_provider(provider), + messages=messages, + stream=True, + ) if provider else g4f.ChatCompletion.create( + model=model, messages=messages, stream=True + ) return self.app.response_class(stream(), mimetype='text/event-stream') -- cgit v1.2.3