diff options
author | H Lohaus <hlohaus@users.noreply.github.com> | 2023-12-08 20:07:28 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-12-08 20:07:28 +0100 |
commit | c3ccc4e81945cb0fdb62dd66071f2a57797f31b6 (patch) | |
tree | 6e6e3debbca03685dc0568b948b17df214a13825 /g4f | |
parent | Update README.md (diff) | |
download | gpt4free-c3ccc4e81945cb0fdb62dd66071f2a57797f31b6.tar gpt4free-c3ccc4e81945cb0fdb62dd66071f2a57797f31b6.tar.gz gpt4free-c3ccc4e81945cb0fdb62dd66071f2a57797f31b6.tar.bz2 gpt4free-c3ccc4e81945cb0fdb62dd66071f2a57797f31b6.tar.lz gpt4free-c3ccc4e81945cb0fdb62dd66071f2a57797f31b6.tar.xz gpt4free-c3ccc4e81945cb0fdb62dd66071f2a57797f31b6.tar.zst gpt4free-c3ccc4e81945cb0fdb62dd66071f2a57797f31b6.zip |
Diffstat (limited to 'g4f')
-rw-r--r-- | g4f/Provider/Llama2.py | 16 |
1 files changed, 7 insertions, 9 deletions
diff --git a/g4f/Provider/Llama2.py b/g4f/Provider/Llama2.py index efe5bdc6..6d0c0a44 100644 --- a/g4f/Provider/Llama2.py +++ b/g4f/Provider/Llama2.py @@ -6,10 +6,9 @@ from ..typing import AsyncResult, Messages from .base_provider import AsyncGeneratorProvider models = { - "meta-llama/Llama-2-7b-chat-hf": {"name": "Llama 2 7B", "version": "d24902e3fa9b698cc208b5e63136c4e26e828659a9f09827ca6ec5bb83014381", "shortened":"7B"}, - "meta-llama/Llama-2-13b-chat-hf": {"name": "Llama 2 13B", "version": "9dff94b1bed5af738655d4a7cbcdcde2bd503aa85c94334fe1f42af7f3dd5ee3", "shortened":"13B"}, - "meta-llama/Llama-2-70b-chat-hf": {"name": "Llama 2 70B", "version": "2796ee9483c3fd7aa2e171d38f4ca12251a30609463dcfd4cd76703f22e96cdf", "shortened":"70B"}, - "Llava": {"name": "Llava 13B", "version": "6bc1c7bb0d2a34e413301fee8f7cc728d2d4e75bfab186aa995f63292bda92fc", "shortened":"Llava"} + "meta-llama/Llama-2-7b-chat-hf": "meta/llama-2-7b-chat", + "meta-llama/Llama-2-13b-chat-hf": "meta/llama-2-13b-chat", + "meta-llama/Llama-2-70b-chat-hf": "meta/llama-2-70b-chat", } class Llama2(AsyncGeneratorProvider): @@ -26,10 +25,9 @@ class Llama2(AsyncGeneratorProvider): **kwargs ) -> AsyncResult: if not model: - model = "meta-llama/Llama-2-70b-chat-hf" - elif model not in models: - raise ValueError(f"Model are not supported: {model}") - version = models[model]["version"] + model = "meta/llama-2-70b-chat" + elif model in models: + model = models[model] headers = { "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0", "Accept": "*/*", @@ -50,7 +48,7 @@ class Llama2(AsyncGeneratorProvider): prompt = format_prompt(messages) data = { "prompt": prompt, - "version": version, + "model": model, "systemPrompt": kwargs.get("system_message", "You are a helpful assistant."), "temperature": kwargs.get("temperature", 0.75), "topP": kwargs.get("top_p", 0.9), |