summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/local
diff options
context:
space:
mode:
Diffstat (limited to 'g4f/Provider/local')
-rw-r--r--g4f/Provider/local/Local.py43
-rw-r--r--g4f/Provider/local/Ollama.py40
-rw-r--r--g4f/Provider/local/__init__.py2
3 files changed, 85 insertions, 0 deletions
diff --git a/g4f/Provider/local/Local.py b/g4f/Provider/local/Local.py
new file mode 100644
index 00000000..4dc6e3f9
--- /dev/null
+++ b/g4f/Provider/local/Local.py
@@ -0,0 +1,43 @@
+from __future__ import annotations
+
+from ...locals.models import get_models
+try:
+ from ...locals.provider import LocalProvider
+ has_requirements = True
+except ImportError:
+ has_requirements = False
+
+from ...typing import Messages, CreateResult
+from ...providers.base_provider import AbstractProvider, ProviderModelMixin
+from ...errors import MissingRequirementsError
+
+class Local(AbstractProvider, ProviderModelMixin):
+ label = "GPT4All"
+ working = True
+ supports_message_history = True
+ supports_system_message = True
+ supports_stream = True
+
+ @classmethod
+ def get_models(cls):
+ if not cls.models:
+ cls.models = list(get_models())
+ cls.default_model = cls.models[0]
+ return cls.models
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool,
+ **kwargs
+ ) -> CreateResult:
+ if not has_requirements:
+ raise MissingRequirementsError('Install "gpt4all" package | pip install -U g4f[local]')
+ return LocalProvider.create_completion(
+ cls.get_model(model),
+ messages,
+ stream,
+ **kwargs
+ )
diff --git a/g4f/Provider/local/Ollama.py b/g4f/Provider/local/Ollama.py
new file mode 100644
index 00000000..c503a46a
--- /dev/null
+++ b/g4f/Provider/local/Ollama.py
@@ -0,0 +1,40 @@
+from __future__ import annotations
+
+import requests
+import os
+
+from ..needs_auth.Openai import Openai
+from ...typing import AsyncResult, Messages
+
+class Ollama(Openai):
+ label = "Ollama"
+ url = "https://ollama.com"
+ needs_auth = False
+ working = True
+
+ @classmethod
+ def get_models(cls):
+ if not cls.models:
+ host = os.getenv("OLLAMA_HOST", "127.0.0.1")
+ port = os.getenv("OLLAMA_PORT", "11434")
+ url = f"http://{host}:{port}/api/tags"
+ models = requests.get(url).json()["models"]
+ cls.models = [model["name"] for model in models]
+ cls.default_model = cls.models[0]
+ return cls.models
+
+ @classmethod
+ def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ api_base: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ if not api_base:
+ host = os.getenv("OLLAMA_HOST", "localhost")
+ port = os.getenv("OLLAMA_PORT", "11434")
+ api_base: str = f"http://{host}:{port}/v1"
+ return super().create_async_generator(
+ model, messages, api_base=api_base, **kwargs
+ )
diff --git a/g4f/Provider/local/__init__.py b/g4f/Provider/local/__init__.py
new file mode 100644
index 00000000..05f6022e
--- /dev/null
+++ b/g4f/Provider/local/__init__.py
@@ -0,0 +1,2 @@
+from .Local import Local
+from .Ollama import Ollama