summaryrefslogtreecommitdiffstats
path: root/g4f
diff options
context:
space:
mode:
authorkqlio67 <kqlio67@users.noreply.github.com>2024-11-12 19:50:15 +0100
committerkqlio67 <kqlio67@users.noreply.github.com>2024-11-12 19:50:15 +0100
commit330d920e4efb8a590b531ca4a18fa2711eff3e18 (patch)
tree09f44cd00bc1747fd00aaf48c08dca154c68a725 /g4f
parentUpdate (g4f/models.py g4f/Provider/PerplexityLabs.py) (diff)
downloadgpt4free-330d920e4efb8a590b531ca4a18fa2711eff3e18.tar
gpt4free-330d920e4efb8a590b531ca4a18fa2711eff3e18.tar.gz
gpt4free-330d920e4efb8a590b531ca4a18fa2711eff3e18.tar.bz2
gpt4free-330d920e4efb8a590b531ca4a18fa2711eff3e18.tar.lz
gpt4free-330d920e4efb8a590b531ca4a18fa2711eff3e18.tar.xz
gpt4free-330d920e4efb8a590b531ca4a18fa2711eff3e18.tar.zst
gpt4free-330d920e4efb8a590b531ca4a18fa2711eff3e18.zip
Diffstat (limited to 'g4f')
-rw-r--r--g4f/Provider/ReplicateHome.py32
-rw-r--r--g4f/models.py18
2 files changed, 19 insertions, 31 deletions
diff --git a/g4f/Provider/ReplicateHome.py b/g4f/Provider/ReplicateHome.py
index 7f443a7d..a7fc9b54 100644
--- a/g4f/Provider/ReplicateHome.py
+++ b/g4f/Provider/ReplicateHome.py
@@ -17,7 +17,13 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
supports_system_message = True
supports_message_history = True
- default_model = 'meta/meta-llama-3-70b-instruct'
+ default_model = 'yorickvp/llava-13b'
+
+ image_models = [
+ 'stability-ai/stable-diffusion-3',
+ 'bytedance/sdxl-lightning-4step',
+ 'playgroundai/playground-v2.5-1024px-aesthetic',
+ ]
text_models = [
'meta/meta-llama-3-70b-instruct',
@@ -26,35 +32,31 @@ class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin):
'yorickvp/llava-13b',
]
- image_models = [
- 'black-forest-labs/flux-schnell',
- 'stability-ai/stable-diffusion-3',
- 'bytedance/sdxl-lightning-4step',
- 'playgroundai/playground-v2.5-1024px-aesthetic',
- ]
+
models = text_models + image_models
model_aliases = {
- "flux-schnell": "black-forest-labs/flux-schnell",
+ # image_models
"sd-3": "stability-ai/stable-diffusion-3",
"sdxl": "bytedance/sdxl-lightning-4step",
"playground-v2.5": "playgroundai/playground-v2.5-1024px-aesthetic",
- "llama-3-70b": "meta/meta-llama-3-70b-instruct",
- "mixtral-8x7b": "mistralai/mixtral-8x7b-instruct-v0.1",
+
+ # text_models
"gemma-2b": "google-deepmind/gemma-2b-it",
"llava-13b": "yorickvp/llava-13b",
}
model_versions = {
- "meta/meta-llama-3-70b-instruct": "fbfb20b472b2f3bdd101412a9f70a0ed4fc0ced78a77ff00970ee7a2383c575d",
- "mistralai/mixtral-8x7b-instruct-v0.1": "5d78bcd7a992c4b793465bcdcf551dc2ab9668d12bb7aa714557a21c1e77041c",
- "google-deepmind/gemma-2b-it": "dff94eaf770e1fc211e425a50b51baa8e4cac6c39ef074681f9e39d778773626",
- "yorickvp/llava-13b": "80537f9eead1a5bfa72d5ac6ea6414379be41d4d4f6679fd776e9535d1eb58bb",
- 'black-forest-labs/flux-schnell': "f2ab8a5bfe79f02f0789a146cf5e73d2a4ff2684a98c2b303d1e1ff3814271db",
+ # image_models
'stability-ai/stable-diffusion-3': "527d2a6296facb8e47ba1eaf17f142c240c19a30894f437feee9b91cc29d8e4f",
'bytedance/sdxl-lightning-4step': "5f24084160c9089501c1b3545d9be3c27883ae2239b6f412990e82d4a6210f8f",
'playgroundai/playground-v2.5-1024px-aesthetic': "a45f82a1382bed5c7aeb861dac7c7d191b0fdf74d8d57c4a0e6ed7d4d0bf7d24",
+
+ # text_models
+ "google-deepmind/gemma-2b-it": "dff94eaf770e1fc211e425a50b51baa8e4cac6c39ef074681f9e39d778773626",
+ "yorickvp/llava-13b": "80537f9eead1a5bfa72d5ac6ea6414379be41d4d4f6679fd776e9535d1eb58bb",
+
}
@classmethod
diff --git a/g4f/models.py b/g4f/models.py
index ce2588e4..1cefae8b 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -164,12 +164,6 @@ llama_3_8b = Model(
best_provider = IterListProvider([Cloudflare])
)
-llama_3_70b = Model(
- name = "llama-3-70b",
- base_provider = "Meta Llama",
- best_provider = IterListProvider([ReplicateHome])
-)
-
# llama 3.1
llama_3_1_8b = Model(
name = "llama-3.1-8b",
@@ -212,7 +206,7 @@ mistral_7b = Model(
mixtral_8x7b = Model(
name = "mixtral-8x7b",
base_provider = "Mistral",
- best_provider = IterListProvider([DDG, ReplicateHome])
+ best_provider = DDG
)
mistral_nemo = Model(
@@ -279,7 +273,7 @@ gemini = Model(
gemma_2b = Model(
name = 'gemma-2b',
base_provider = 'Google',
- best_provider = IterListProvider([ReplicateHome])
+ best_provider = ReplicateHome
)
@@ -583,12 +577,6 @@ flux_4o = Model(
)
-flux_schnell = Model(
- name = 'flux-schnell',
- base_provider = 'Flux AI',
- best_provider = ReplicateHome
-
-)
### Other ###
@@ -635,7 +623,6 @@ class ModelUtils:
# llama-3
'llama-3-8b': llama_3_8b,
-'llama-3-70b': llama_3_70b,
# llama-3.1
'llama-3.1-8b': llama_3_1_8b,
@@ -802,7 +789,6 @@ class ModelUtils:
'flux-disney': flux_disney,
'flux-pixel': flux_pixel,
'flux-4o': flux_4o,
-'flux-schnell': flux_schnell,
### Other ###