diff options
author | abc <98614666+xtekky@users.noreply.github.com> | 2024-03-11 19:26:34 +0100 |
---|---|---|
committer | abc <98614666+xtekky@users.noreply.github.com> | 2024-03-11 19:26:34 +0100 |
commit | b7342b1f130aa867eec17d973b0cab00b16a4507 (patch) | |
tree | a0936e607ee1701ab3ae68f2dc47bf503c20bff7 /g4f/local/core/engine.py | |
parent | ~ (diff) | |
download | gpt4free-b7342b1f130aa867eec17d973b0cab00b16a4507.tar gpt4free-b7342b1f130aa867eec17d973b0cab00b16a4507.tar.gz gpt4free-b7342b1f130aa867eec17d973b0cab00b16a4507.tar.bz2 gpt4free-b7342b1f130aa867eec17d973b0cab00b16a4507.tar.lz gpt4free-b7342b1f130aa867eec17d973b0cab00b16a4507.tar.xz gpt4free-b7342b1f130aa867eec17d973b0cab00b16a4507.tar.zst gpt4free-b7342b1f130aa867eec17d973b0cab00b16a4507.zip |
Diffstat (limited to 'g4f/local/core/engine.py')
-rw-r--r-- | g4f/local/core/engine.py | 42 |
1 files changed, 42 insertions, 0 deletions
diff --git a/g4f/local/core/engine.py b/g4f/local/core/engine.py new file mode 100644 index 00000000..920ed9b4 --- /dev/null +++ b/g4f/local/core/engine.py @@ -0,0 +1,42 @@ +import os + +from gpt4all import GPT4All +from .models import models + +class LocalProvider: + @staticmethod + def create_completion(model, messages, stream, **kwargs): + if model not in models: + raise ValueError(f"Model '{model}' not found / not yet implemented") + + model = models[model] + model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../models/') + full_model_path = os.path.join(model_dir, model['path']) + + if not os.path.isfile(full_model_path): + print(f"Model file '{full_model_path}' not found.") + download = input(f'Do you want to download {model["path"]} ? [y/n]') + + if download in ['y', 'Y']: + GPT4All.download_model(model['path'], model_dir) + else: + raise ValueError(f"Model '{model['path']}' not found.") + + model = GPT4All(model_name=model['path'], + n_threads=8, + verbose=False, + allow_download=False, + model_path=model_dir) + + system_template = next((message['content'] for message in messages if message['role'] == 'system'), + 'A chat between a curious user and an artificial intelligence assistant.') + + prompt_template = 'USER: {0}\nASSISTANT: ' + conversation = '\n'.join(f"{msg['role'].upper()}: {msg['content']}" for msg in messages) + "\nASSISTANT: " + + with model.chat_session(system_template, prompt_template): + if stream: + for token in model.generate(conversation, streaming=True): + yield token + else: + yield model.generate(conversation)
\ No newline at end of file |