From e98793d0a7af43878cf023fb045dd945a82507cf Mon Sep 17 00:00:00 2001 From: kqlio67 Date: Wed, 6 Nov 2024 17:25:09 +0200 Subject: Update (g4f/models.py g4f/Provider/ docs/providers-and-models.md) --- g4f/Provider/local/Local.py | 43 ++++++++++++++++++++++++++++++++++++++++++ g4f/Provider/local/Ollama.py | 40 +++++++++++++++++++++++++++++++++++++++ g4f/Provider/local/__init__.py | 2 ++ 3 files changed, 85 insertions(+) create mode 100644 g4f/Provider/local/Local.py create mode 100644 g4f/Provider/local/Ollama.py create mode 100644 g4f/Provider/local/__init__.py (limited to 'g4f/Provider/local') diff --git a/g4f/Provider/local/Local.py b/g4f/Provider/local/Local.py new file mode 100644 index 00000000..4dc6e3f9 --- /dev/null +++ b/g4f/Provider/local/Local.py @@ -0,0 +1,43 @@ +from __future__ import annotations + +from ...locals.models import get_models +try: + from ...locals.provider import LocalProvider + has_requirements = True +except ImportError: + has_requirements = False + +from ...typing import Messages, CreateResult +from ...providers.base_provider import AbstractProvider, ProviderModelMixin +from ...errors import MissingRequirementsError + +class Local(AbstractProvider, ProviderModelMixin): + label = "GPT4All" + working = True + supports_message_history = True + supports_system_message = True + supports_stream = True + + @classmethod + def get_models(cls): + if not cls.models: + cls.models = list(get_models()) + cls.default_model = cls.models[0] + return cls.models + + @classmethod + def create_completion( + cls, + model: str, + messages: Messages, + stream: bool, + **kwargs + ) -> CreateResult: + if not has_requirements: + raise MissingRequirementsError('Install "gpt4all" package | pip install -U g4f[local]') + return LocalProvider.create_completion( + cls.get_model(model), + messages, + stream, + **kwargs + ) diff --git a/g4f/Provider/local/Ollama.py b/g4f/Provider/local/Ollama.py new file mode 100644 index 00000000..c503a46a --- /dev/null +++ b/g4f/Provider/local/Ollama.py @@ -0,0 +1,40 @@ +from __future__ import annotations + +import requests +import os + +from ..needs_auth.Openai import Openai +from ...typing import AsyncResult, Messages + +class Ollama(Openai): + label = "Ollama" + url = "https://ollama.com" + needs_auth = False + working = True + + @classmethod + def get_models(cls): + if not cls.models: + host = os.getenv("OLLAMA_HOST", "127.0.0.1") + port = os.getenv("OLLAMA_PORT", "11434") + url = f"http://{host}:{port}/api/tags" + models = requests.get(url).json()["models"] + cls.models = [model["name"] for model in models] + cls.default_model = cls.models[0] + return cls.models + + @classmethod + def create_async_generator( + cls, + model: str, + messages: Messages, + api_base: str = None, + **kwargs + ) -> AsyncResult: + if not api_base: + host = os.getenv("OLLAMA_HOST", "localhost") + port = os.getenv("OLLAMA_PORT", "11434") + api_base: str = f"http://{host}:{port}/v1" + return super().create_async_generator( + model, messages, api_base=api_base, **kwargs + ) diff --git a/g4f/Provider/local/__init__.py b/g4f/Provider/local/__init__.py new file mode 100644 index 00000000..05f6022e --- /dev/null +++ b/g4f/Provider/local/__init__.py @@ -0,0 +1,2 @@ +from .Local import Local +from .Ollama import Ollama -- cgit v1.2.3