summaryrefslogblamecommitdiffstats
path: root/g4f/Provider/Ollama.py
blob: f91165417e3d5e24b5f885ce7a9682a475980da0 (plain) (tree)
1
2
3
4


                                  
         












                                          


                                                        
                                                       
                                                            







                                             
                             

                     



                                                        


                                                        
from __future__ import annotations

import requests
import os

from .needs_auth.Openai import Openai
from ..typing import AsyncResult, Messages

class Ollama(Openai):
    label = "Ollama"
    url = "https://ollama.com"
    needs_auth = False
    working = True

    @classmethod
    def get_models(cls):
        if not cls.models:
            host = os.getenv("OLLAMA_HOST", "127.0.0.1")
            port = os.getenv("OLLAMA_PORT", "11434")
            url = f"http://{host}:{port}/api/tags"
            models = requests.get(url).json()["models"]
            cls.models = [model["name"] for model in models]
            cls.default_model = cls.models[0]
        return cls.models

    @classmethod
    def create_async_generator(
        cls,
        model: str,
        messages: Messages,
        api_base: str = None,
        **kwargs
    ) -> AsyncResult:
        if not api_base:
            host = os.getenv("OLLAMA_HOST", "localhost")
            port = os.getenv("OLLAMA_PORT", "11434")
            api_base: str = f"http://{host}:{port}/v1"
        return super().create_async_generator(
            model, messages, api_base=api_base, **kwargs
        )