summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/Llama2.py
diff options
context:
space:
mode:
authorH Lohaus <hlohaus@users.noreply.github.com>2024-01-23 20:08:41 +0100
committerGitHub <noreply@github.com>2024-01-23 20:08:41 +0100
commit2b140a32554c1e94d095c55599a2f93e86f957cf (patch)
treee2770d97f0242a0b99a3af68ea4fcf25227dfcc8 /g4f/Provider/Llama2.py
parent~ (diff)
parentAdd ProviderModelMixin for model selection (diff)
downloadgpt4free-2b140a32554c1e94d095c55599a2f93e86f957cf.tar
gpt4free-2b140a32554c1e94d095c55599a2f93e86f957cf.tar.gz
gpt4free-2b140a32554c1e94d095c55599a2f93e86f957cf.tar.bz2
gpt4free-2b140a32554c1e94d095c55599a2f93e86f957cf.tar.lz
gpt4free-2b140a32554c1e94d095c55599a2f93e86f957cf.tar.xz
gpt4free-2b140a32554c1e94d095c55599a2f93e86f957cf.tar.zst
gpt4free-2b140a32554c1e94d095c55599a2f93e86f957cf.zip
Diffstat (limited to 'g4f/Provider/Llama2.py')
-rw-r--r--g4f/Provider/Llama2.py26
1 files changed, 14 insertions, 12 deletions
diff --git a/g4f/Provider/Llama2.py b/g4f/Provider/Llama2.py
index 17969621..d1f8e194 100644
--- a/g4f/Provider/Llama2.py
+++ b/g4f/Provider/Llama2.py
@@ -3,18 +3,24 @@ from __future__ import annotations
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-models = {
- "meta-llama/Llama-2-7b-chat-hf": "meta/llama-2-7b-chat",
- "meta-llama/Llama-2-13b-chat-hf": "meta/llama-2-13b-chat",
- "meta-llama/Llama-2-70b-chat-hf": "meta/llama-2-70b-chat",
-}
-class Llama2(AsyncGeneratorProvider):
+class Llama2(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.llama2.ai"
working = True
supports_message_history = True
+ default_model = "meta/llama-2-70b-chat"
+ models = [
+ "meta/llama-2-7b-chat",
+ "meta/llama-2-13b-chat",
+ "meta/llama-2-70b-chat",
+ ]
+ model_aliases = {
+ "meta-llama/Llama-2-7b-chat-hf": "meta/llama-2-7b-chat",
+ "meta-llama/Llama-2-13b-chat-hf": "meta/llama-2-13b-chat",
+ "meta-llama/Llama-2-70b-chat-hf": "meta/llama-2-70b-chat",
+ }
@classmethod
async def create_async_generator(
@@ -24,10 +30,6 @@ class Llama2(AsyncGeneratorProvider):
proxy: str = None,
**kwargs
) -> AsyncResult:
- if not model:
- model = "meta/llama-2-70b-chat"
- elif model in models:
- model = models[model]
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
"Accept": "*/*",
@@ -48,7 +50,7 @@ class Llama2(AsyncGeneratorProvider):
prompt = format_prompt(messages)
data = {
"prompt": prompt,
- "model": model,
+ "model": cls.get_model(model),
"systemPrompt": kwargs.get("system_message", "You are a helpful assistant."),
"temperature": kwargs.get("temperature", 0.75),
"topP": kwargs.get("top_p", 0.9),