summaryrefslogtreecommitdiffstats
path: root/g4f/local
diff options
context:
space:
mode:
Diffstat (limited to 'g4f/local')
-rw-r--r--g4f/local/__init__.py21
-rw-r--r--g4f/local/_engine.py42
-rw-r--r--g4f/local/_models.py86
-rw-r--r--g4f/local/models/model-here1
4 files changed, 11 insertions, 139 deletions
diff --git a/g4f/local/__init__.py b/g4f/local/__init__.py
index c9d3d74a..117cd48d 100644
--- a/g4f/local/__init__.py
+++ b/g4f/local/__init__.py
@@ -1,17 +1,19 @@
-from ..typing import Union, Iterator, Messages
-from ..stubs import ChatCompletion, ChatCompletionChunk
-from ._engine import LocalProvider
-from ._models import models
-from ..client import iter_response, filter_none, IterResponse
+from __future__ import annotations
+
+from ..typing import Union, Messages
+from ..locals.provider import LocalProvider
+from ..locals.models import get_models
+from ..client.client import iter_response, filter_none
+from ..client.types import IterResponse
class LocalClient():
def __init__(self, **kwargs) -> None:
self.chat: Chat = Chat(self)
-
+
@staticmethod
def list_models():
- return list(models.keys())
-
+ return list(get_models())
+
class Completions():
def __init__(self, client: LocalClient):
self.client: LocalClient = client
@@ -25,8 +27,7 @@ class Completions():
max_tokens: int = None,
stop: Union[list[str], str] = None,
**kwargs
- ) -> Union[ChatCompletion, Iterator[ChatCompletionChunk]]:
-
+ ) -> IterResponse:
stop = [stop] if isinstance(stop, str) else stop
response = LocalProvider.create_completion(
model, messages, stream,
diff --git a/g4f/local/_engine.py b/g4f/local/_engine.py
deleted file mode 100644
index 917de16c..00000000
--- a/g4f/local/_engine.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import os
-
-from gpt4all import GPT4All
-from ._models import models
-
-class LocalProvider:
- @staticmethod
- def create_completion(model, messages, stream, **kwargs):
- if model not in models:
- raise ValueError(f"Model '{model}' not found / not yet implemented")
-
- model = models[model]
- model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'models/')
- full_model_path = os.path.join(model_dir, model['path'])
-
- if not os.path.isfile(full_model_path):
- print(f"Model file '{full_model_path}' not found.")
- download = input(f'Do you want to download {model["path"]} ? [y/n]')
-
- if download in ['y', 'Y']:
- GPT4All.download_model(model['path'], model_dir)
- else:
- raise ValueError(f"Model '{model['path']}' not found.")
-
- model = GPT4All(model_name=model['path'],
- #n_threads=8,
- verbose=False,
- allow_download=False,
- model_path=model_dir)
-
- system_template = next((message['content'] for message in messages if message['role'] == 'system'),
- 'A chat between a curious user and an artificial intelligence assistant.')
-
- prompt_template = 'USER: {0}\nASSISTANT: '
- conversation = '\n'.join(f"{msg['role'].upper()}: {msg['content']}" for msg in messages) + "\nASSISTANT: "
-
- with model.chat_session(system_template, prompt_template):
- if stream:
- for token in model.generate(conversation, streaming=True):
- yield token
- else:
- yield model.generate(conversation) \ No newline at end of file
diff --git a/g4f/local/_models.py b/g4f/local/_models.py
deleted file mode 100644
index ec36fe41..00000000
--- a/g4f/local/_models.py
+++ /dev/null
@@ -1,86 +0,0 @@
-models = {
- "mistral-7b": {
- "path": "mistral-7b-openorca.gguf2.Q4_0.gguf",
- "ram": "8",
- "prompt": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n",
- "system": "<|im_start|>system\nYou are MistralOrca, a large language model trained by Alignment Lab AI. For multi-step problems, write out your reasoning for each step.\n<|im_end|>"
- },
- "mistral-7b-instruct": {
- "path": "mistral-7b-instruct-v0.1.Q4_0.gguf",
- "ram": "8",
- "prompt": "[INST] %1 [/INST]",
- "system": None
- },
- "gpt4all-falcon": {
- "path": "gpt4all-falcon-newbpe-q4_0.gguf",
- "ram": "8",
- "prompt": "### Instruction:\n%1\n### Response:\n",
- "system": None
- },
- "orca-2": {
- "path": "orca-2-13b.Q4_0.gguf",
- "ram": "16",
- "prompt": None,
- "system": None
- },
- "wizardlm-13b": {
- "path": "wizardlm-13b-v1.2.Q4_0.gguf",
- "ram": "16",
- "prompt": None,
- "system": None
- },
- "nous-hermes-llama2": {
- "path": "nous-hermes-llama2-13b.Q4_0.gguf",
- "ram": "16",
- "prompt": "### Instruction:\n%1\n### Response:\n",
- "system": None
- },
- "gpt4all-13b-snoozy": {
- "path": "gpt4all-13b-snoozy-q4_0.gguf",
- "ram": "16",
- "prompt": None,
- "system": None
- },
- "mpt-7b-chat": {
- "path": "mpt-7b-chat-newbpe-q4_0.gguf",
- "ram": "8",
- "prompt": "<|im_start|>user\n%1<|im_end|>\n<|im_start|>assistant\n",
- "system": "<|im_start|>system\n- You are a helpful assistant chatbot trained by MosaicML.\n- You answer questions.\n- You are excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.\n- You are more than just an information source, you are also able to write poetry, short stories, and make jokes.<|im_end|>"
- },
- "orca-mini-3b": {
- "path": "orca-mini-3b-gguf2-q4_0.gguf",
- "ram": "4",
- "prompt": "### User:\n%1\n### Response:\n",
- "system": "### System:\nYou are an AI assistant that follows instruction extremely well. Help as much as you can.\n\n"
- },
- "replit-code-3b": {
- "path": "replit-code-v1_5-3b-newbpe-q4_0.gguf",
- "ram": "4",
- "prompt": "%1",
- "system": None
- },
- "starcoder": {
- "path": "starcoder-newbpe-q4_0.gguf",
- "ram": "4",
- "prompt": "%1",
- "system": None
- },
- "rift-coder-7b": {
- "path": "rift-coder-v0-7b-q4_0.gguf",
- "ram": "8",
- "prompt": "%1",
- "system": None
- },
- "all-MiniLM-L6-v2": {
- "path": "all-MiniLM-L6-v2-f16.gguf",
- "ram": "1",
- "prompt": None,
- "system": None
- },
- "mistral-7b-german": {
- "path": "em_german_mistral_v01.Q4_0.gguf",
- "ram": "8",
- "prompt": "USER: %1 ASSISTANT: ",
- "system": "Du bist ein hilfreicher Assistent. "
- }
-} \ No newline at end of file
diff --git a/g4f/local/models/model-here b/g4f/local/models/model-here
deleted file mode 100644
index 945c9b46..00000000
--- a/g4f/local/models/model-here
+++ /dev/null
@@ -1 +0,0 @@
-. \ No newline at end of file