From b35dfcd1b01c575b65e0299ef71d285dc8f41459 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Sun, 7 Apr 2024 10:36:13 +0200 Subject: Add local models to gui, Fix You Provider, add AsyncClient --- g4f/locals/__init__.py | 0 g4f/locals/models.py | 50 +++++++++++++++++++++++++++++++++++ g4f/locals/provider.py | 72 ++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 122 insertions(+) create mode 100644 g4f/locals/__init__.py create mode 100644 g4f/locals/models.py create mode 100644 g4f/locals/provider.py (limited to 'g4f/locals') diff --git a/g4f/locals/__init__.py b/g4f/locals/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/g4f/locals/models.py b/g4f/locals/models.py new file mode 100644 index 00000000..85777ef2 --- /dev/null +++ b/g4f/locals/models.py @@ -0,0 +1,50 @@ + +import os +import requests +import json + +from ..requests.raise_for_status import raise_for_status + +def load_models(): + response = requests.get("https://gpt4all.io/models/models3.json") + raise_for_status(response) + return format_models(response.json()) + +def get_model_name(filename: str) -> str: + name = filename.split(".", 1)[0] + for replace in ["-v1_5", "-v1", "-q4_0", "_v01", "-v0", "-f16", "-gguf2", "-newbpe"]: + name = name.replace(replace, "") + return name + +def format_models(models: list) -> dict: + return {get_model_name(model["filename"]): { + "path": model["filename"], + "ram": model["ramrequired"], + "prompt": model["promptTemplate"] if "promptTemplate" in model else None, + "system": model["systemPrompt"] if "systemPrompt" in model else None, + } for model in models} + +def read_models(file_path: str): + with open(file_path, "rb") as f: + return json.load(f) + +def save_models(file_path: str, data): + with open(file_path, 'w') as f: + json.dump(data, f, indent=4) + +def get_model_dir() -> str: + local_dir = os.path.dirname(os.path.abspath(__file__)) + project_dir = os.path.dirname(os.path.dirname(local_dir)) + model_dir = os.path.join(project_dir, "models") + if os.path.exists(model_dir): + return model_dir + +def get_models() -> dict[str, dict]: + model_dir = get_model_dir() + file_path = os.path.join(model_dir, "models.json") + if os.path.isfile(file_path): + return read_models(file_path) + else: + models = load_models() + save_models(file_path, models) + return models \ No newline at end of file diff --git a/g4f/locals/provider.py b/g4f/locals/provider.py new file mode 100644 index 00000000..09dfd4fe --- /dev/null +++ b/g4f/locals/provider.py @@ -0,0 +1,72 @@ +import os + +from gpt4all import GPT4All +from .models import get_models +from ..typing import Messages + +MODEL_LIST: dict[str, dict] = None + +def find_model_dir(model_file: str) -> str: + local_dir = os.path.dirname(os.path.abspath(__file__)) + project_dir = os.path.dirname(os.path.dirname(local_dir)) + + new_model_dir = os.path.join(project_dir, "models") + new_model_file = os.path.join(new_model_dir, model_file) + if os.path.isfile(new_model_file): + return new_model_dir + + old_model_dir = os.path.join(local_dir, "models") + old_model_file = os.path.join(old_model_dir, model_file) + if os.path.isfile(old_model_file): + return old_model_dir + + working_dir = "./" + for root, dirs, files in os.walk(working_dir): + if model_file in files: + return root + + return new_model_dir + +class LocalProvider: + @staticmethod + def create_completion(model: str, messages: Messages, stream: bool = False, **kwargs): + global MODEL_LIST + if MODEL_LIST is None: + MODEL_LIST = get_models() + if model not in MODEL_LIST: + raise ValueError(f'Model "{model}" not found / not yet implemented') + + model = MODEL_LIST[model] + model_file = model["path"] + model_dir = find_model_dir(model_file) + if not os.path.isfile(os.path.join(model_dir, model_file)): + print(f'Model file "models/{model_file}" not found.') + download = input(f"Do you want to download {model_file}? [y/n]: ") + if download in ["y", "Y"]: + GPT4All.download_model(model_file, model_dir) + else: + raise ValueError(f'Model "{model_file}" not found.') + + model = GPT4All(model_name=model_file, + #n_threads=8, + verbose=False, + allow_download=False, + model_path=model_dir) + + system_message = "\n".join(message["content"] for message in messages if message["role"] == "system") + if system_message: + system_message = "A chat between a curious user and an artificial intelligence assistant." + + prompt_template = "USER: {0}\nASSISTANT: " + conversation = "\n" . join( + f"{message['role'].upper()}: {message['content']}" + for message in messages + if message["role"] != "system" + ) + "\nASSISTANT: " + + with model.chat_session(system_message, prompt_template): + if stream: + for token in model.generate(conversation, streaming=True): + yield token + else: + yield model.generate(conversation) \ No newline at end of file -- cgit v1.2.3 From bc7fbf08ff6d5554ec1ab2ed3f9b420379ca3907 Mon Sep 17 00:00:00 2001 From: H Lohaus Date: Sun, 7 Apr 2024 11:24:48 +0200 Subject: Update models.py --- g4f/locals/models.py | 1 + 1 file changed, 1 insertion(+) (limited to 'g4f/locals') diff --git a/g4f/locals/models.py b/g4f/locals/models.py index 85777ef2..f82f5448 100644 --- a/g4f/locals/models.py +++ b/g4f/locals/models.py @@ -1,3 +1,4 @@ +from __future__ import annotations import os import requests -- cgit v1.2.3 From 91c396e32951b51196c3d946be2572ba832587ae Mon Sep 17 00:00:00 2001 From: H Lohaus Date: Sun, 7 Apr 2024 11:25:14 +0200 Subject: Update provider.py --- g4f/locals/provider.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'g4f/locals') diff --git a/g4f/locals/provider.py b/g4f/locals/provider.py index 09dfd4fe..45041539 100644 --- a/g4f/locals/provider.py +++ b/g4f/locals/provider.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import os from gpt4all import GPT4All -- cgit v1.2.3