summaryrefslogtreecommitdiffstats
path: root/g4f/Provider
diff options
context:
space:
mode:
authorkqlio67 <166700875+kqlio67@users.noreply.github.com>2024-04-19 14:10:56 +0200
committerGitHub <noreply@github.com>2024-04-19 14:10:56 +0200
commit81cf5d7c771c4ff1a9d49db754ccef780f8b1d99 (patch)
tree4af3e5d0c72dc9c30d86a02653e345cf14399e30 /g4f/Provider
parentAdded Meta llama-3 support! (#1856) (diff)
downloadgpt4free-81cf5d7c771c4ff1a9d49db754ccef780f8b1d99.tar
gpt4free-81cf5d7c771c4ff1a9d49db754ccef780f8b1d99.tar.gz
gpt4free-81cf5d7c771c4ff1a9d49db754ccef780f8b1d99.tar.bz2
gpt4free-81cf5d7c771c4ff1a9d49db754ccef780f8b1d99.tar.lz
gpt4free-81cf5d7c771c4ff1a9d49db754ccef780f8b1d99.tar.xz
gpt4free-81cf5d7c771c4ff1a9d49db754ccef780f8b1d99.tar.zst
gpt4free-81cf5d7c771c4ff1a9d49db754ccef780f8b1d99.zip
Diffstat (limited to 'g4f/Provider')
-rw-r--r--g4f/Provider/Llama.py8
-rw-r--r--g4f/Provider/PerplexityLabs.py7
2 files changed, 6 insertions, 9 deletions
diff --git a/g4f/Provider/Llama.py b/g4f/Provider/Llama.py
index 4d19866e..8f3e9ea2 100644
--- a/g4f/Provider/Llama.py
+++ b/g4f/Provider/Llama.py
@@ -16,12 +16,12 @@ class Llama(AsyncGeneratorProvider, ProviderModelMixin):
"meta/llama-2-7b-chat",
"meta/llama-2-13b-chat",
"meta/llama-2-70b-chat",
- "meta/llama-3-8b-chat",
- "meta/llama-3-70b-chat",
+ "meta/meta-llama-3-8b-instruct",
+ "meta/meta-llama-3-70b-instruct",
]
model_aliases = {
- "meta-llama/Meta-Llama-3-8b": "meta/llama-3-8b-chat",
- "meta-llama/Meta-Llama-3-70b": "meta/llama-3-70b-chat",
+ "meta-llama/Meta-Llama-3-8b-instruct": "meta/meta-llama-3-8b-instruct",
+ "meta-llama/Meta-Llama-3-70b-instruct": "meta/meta-llama-3-70b-instruct",
"meta-llama/Llama-2-7b-chat-hf": "meta/llama-2-7b-chat",
"meta-llama/Llama-2-13b-chat-hf": "meta/llama-2-13b-chat",
"meta-llama/Llama-2-70b-chat-hf": "meta/llama-2-70b-chat",
diff --git a/g4f/Provider/PerplexityLabs.py b/g4f/Provider/PerplexityLabs.py
index ab36d284..b6fec53c 100644
--- a/g4f/Provider/PerplexityLabs.py
+++ b/g4f/Provider/PerplexityLabs.py
@@ -15,10 +15,7 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
working = True
default_model = "mixtral-8x7b-instruct"
models = [
- "sonar-small-online", "sonar-medium-online", "sonar-small-chat", "sonar-medium-chat", "mistral-7b-instruct",
- "codellama-70b-instruct", "llava-v1.5-7b-wrapper", "llava-v1.6-34b", "mixtral-8x7b-instruct",
- "gemma-2b-it", "gemma-7b-it"
- "mistral-medium", "related", "dbrx-instruct"
+ "sonar-small-online", "sonar-medium-online", "sonar-small-chat", "sonar-medium-chat", "dbrx-instruct", "claude-3-haiku-20240307", "llama-3-8b-instruct", "llama-3-70b-instruct", "codellama-70b-instruct", "mistral-7b-instruct", "llava-v1.5-7b-wrapper", "llava-v1.6-34b", "mixtral-8x7b-instruct", "mixtral-8x22b-instruct", "mistral-medium", "gemma-2b-it", "gemma-7b-it", "related"
]
model_aliases = {
"mistralai/Mistral-7B-Instruct-v0.1": "mistral-7b-instruct",
@@ -93,4 +90,4 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
if data["final"]:
break
except:
- raise RuntimeError(f"Message: {message}") \ No newline at end of file
+ raise RuntimeError(f"Message: {message}")