summaryrefslogtreecommitdiffstats
path: root/g4f/locals
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--g4f/locals/__init__.py0
-rw-r--r--g4f/locals/models.py50
-rw-r--r--g4f/locals/provider.py72
3 files changed, 122 insertions, 0 deletions
diff --git a/g4f/locals/__init__.py b/g4f/locals/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/g4f/locals/__init__.py
diff --git a/g4f/locals/models.py b/g4f/locals/models.py
new file mode 100644
index 00000000..85777ef2
--- /dev/null
+++ b/g4f/locals/models.py
@@ -0,0 +1,50 @@
+
+import os
+import requests
+import json
+
+from ..requests.raise_for_status import raise_for_status
+
+def load_models():
+ response = requests.get("https://gpt4all.io/models/models3.json")
+ raise_for_status(response)
+ return format_models(response.json())
+
+def get_model_name(filename: str) -> str:
+ name = filename.split(".", 1)[0]
+ for replace in ["-v1_5", "-v1", "-q4_0", "_v01", "-v0", "-f16", "-gguf2", "-newbpe"]:
+ name = name.replace(replace, "")
+ return name
+
+def format_models(models: list) -> dict:
+ return {get_model_name(model["filename"]): {
+ "path": model["filename"],
+ "ram": model["ramrequired"],
+ "prompt": model["promptTemplate"] if "promptTemplate" in model else None,
+ "system": model["systemPrompt"] if "systemPrompt" in model else None,
+ } for model in models}
+
+def read_models(file_path: str):
+ with open(file_path, "rb") as f:
+ return json.load(f)
+
+def save_models(file_path: str, data):
+ with open(file_path, 'w') as f:
+ json.dump(data, f, indent=4)
+
+def get_model_dir() -> str:
+ local_dir = os.path.dirname(os.path.abspath(__file__))
+ project_dir = os.path.dirname(os.path.dirname(local_dir))
+ model_dir = os.path.join(project_dir, "models")
+ if os.path.exists(model_dir):
+ return model_dir
+
+def get_models() -> dict[str, dict]:
+ model_dir = get_model_dir()
+ file_path = os.path.join(model_dir, "models.json")
+ if os.path.isfile(file_path):
+ return read_models(file_path)
+ else:
+ models = load_models()
+ save_models(file_path, models)
+ return models \ No newline at end of file
diff --git a/g4f/locals/provider.py b/g4f/locals/provider.py
new file mode 100644
index 00000000..09dfd4fe
--- /dev/null
+++ b/g4f/locals/provider.py
@@ -0,0 +1,72 @@
+import os
+
+from gpt4all import GPT4All
+from .models import get_models
+from ..typing import Messages
+
+MODEL_LIST: dict[str, dict] = None
+
+def find_model_dir(model_file: str) -> str:
+ local_dir = os.path.dirname(os.path.abspath(__file__))
+ project_dir = os.path.dirname(os.path.dirname(local_dir))
+
+ new_model_dir = os.path.join(project_dir, "models")
+ new_model_file = os.path.join(new_model_dir, model_file)
+ if os.path.isfile(new_model_file):
+ return new_model_dir
+
+ old_model_dir = os.path.join(local_dir, "models")
+ old_model_file = os.path.join(old_model_dir, model_file)
+ if os.path.isfile(old_model_file):
+ return old_model_dir
+
+ working_dir = "./"
+ for root, dirs, files in os.walk(working_dir):
+ if model_file in files:
+ return root
+
+ return new_model_dir
+
+class LocalProvider:
+ @staticmethod
+ def create_completion(model: str, messages: Messages, stream: bool = False, **kwargs):
+ global MODEL_LIST
+ if MODEL_LIST is None:
+ MODEL_LIST = get_models()
+ if model not in MODEL_LIST:
+ raise ValueError(f'Model "{model}" not found / not yet implemented')
+
+ model = MODEL_LIST[model]
+ model_file = model["path"]
+ model_dir = find_model_dir(model_file)
+ if not os.path.isfile(os.path.join(model_dir, model_file)):
+ print(f'Model file "models/{model_file}" not found.')
+ download = input(f"Do you want to download {model_file}? [y/n]: ")
+ if download in ["y", "Y"]:
+ GPT4All.download_model(model_file, model_dir)
+ else:
+ raise ValueError(f'Model "{model_file}" not found.')
+
+ model = GPT4All(model_name=model_file,
+ #n_threads=8,
+ verbose=False,
+ allow_download=False,
+ model_path=model_dir)
+
+ system_message = "\n".join(message["content"] for message in messages if message["role"] == "system")
+ if system_message:
+ system_message = "A chat between a curious user and an artificial intelligence assistant."
+
+ prompt_template = "USER: {0}\nASSISTANT: "
+ conversation = "\n" . join(
+ f"{message['role'].upper()}: {message['content']}"
+ for message in messages
+ if message["role"] != "system"
+ ) + "\nASSISTANT: "
+
+ with model.chat_session(system_message, prompt_template):
+ if stream:
+ for token in model.generate(conversation, streaming=True):
+ yield token
+ else:
+ yield model.generate(conversation) \ No newline at end of file