summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCommenter123321 <36051603+Commenter123321@users.noreply.github.com>2023-10-10 15:11:17 +0200
committerCommenter123321 <36051603+Commenter123321@users.noreply.github.com>2023-10-10 15:11:17 +0200
commit9239c5720002f31b67bdd1ee805e2cd9b528d085 (patch)
treed48901a7bcce0cdac262f189b67fb5178542e4ce
parentfix provider thing in gui (diff)
downloadgpt4free-9239c5720002f31b67bdd1ee805e2cd9b528d085.tar
gpt4free-9239c5720002f31b67bdd1ee805e2cd9b528d085.tar.gz
gpt4free-9239c5720002f31b67bdd1ee805e2cd9b528d085.tar.bz2
gpt4free-9239c5720002f31b67bdd1ee805e2cd9b528d085.tar.lz
gpt4free-9239c5720002f31b67bdd1ee805e2cd9b528d085.tar.xz
gpt4free-9239c5720002f31b67bdd1ee805e2cd9b528d085.tar.zst
gpt4free-9239c5720002f31b67bdd1ee805e2cd9b528d085.zip
Diffstat (limited to '')
-rw-r--r--g4f/gui/client/html/index.html6
-rw-r--r--g4f/gui/server/backend.py6
-rw-r--r--g4f/gui/server/provider.py7
-rw-r--r--g4f/models.py21
4 files changed, 23 insertions, 17 deletions
diff --git a/g4f/gui/client/html/index.html b/g4f/gui/client/html/index.html
index bb472706..c15584b8 100644
--- a/g4f/gui/client/html/index.html
+++ b/g4f/gui/client/html/index.html
@@ -118,9 +118,13 @@
<div class="field">
<select name="model" id="model">
<option value="gpt-3.5-turbo" selected>gpt-3.5</option>
- <option value="gpt-4">gpt-4</option>
<option value="gpt-3.5-turbo-0613">gpt-3.5 fast</option>
<option value="gpt-3.5-turbo-16k">gpt-3.5 16k</option>
+ <option value="gpt-3.5-turbo-16k-0613">gpt-3.5 16k fast</option>
+ <option value="gpt-4" selected>gpt-4</option>
+ <option value="gpt-4-0613">gpt-4 fast</option>
+ <option value="gpt-4-32k">gpt-4 32k</option>
+ <option value="gpt-4-32k-0613">gpt-4 32k fast</option>
</select>
</div>
<div class="field">
diff --git a/g4f/gui/server/backend.py b/g4f/gui/server/backend.py
index cf1f9428..a76ca12b 100644
--- a/g4f/gui/server/backend.py
+++ b/g4f/gui/server/backend.py
@@ -33,17 +33,17 @@ class Backend_Api:
conversation = request.json['meta']['content']['conversation']
prompt = request.json['meta']['content']['parts'][0]
model = request.json['model']
- provider = get_provider(request.json.get('provider'))
+ provider = request.json.get('provider').split("g4f.Provider.")[1]
messages = special_instructions[jailbreak] + conversation + search(internet_access, prompt) + [prompt]
def stream():
if provider:
answer = g4f.ChatCompletion.create(model=model,
- provider=g4f.Provider.ProviderUtils.convert.get(provider), messages=messages, stream=True)
+ provider=get_provider(provider), messages=messages, stream=True)
else:
answer = g4f.ChatCompletion.create(model=model,
- messages=messages, stream=True)
+ messages=messages, stream=True)
for token in answer:
yield token
diff --git a/g4f/gui/server/provider.py b/g4f/gui/server/provider.py
index 230b5f5f..11202d38 100644
--- a/g4f/gui/server/provider.py
+++ b/g4f/gui/server/provider.py
@@ -3,17 +3,12 @@ from g4f import BaseProvider
def get_provider(provider: str) -> BaseProvider | None:
-
if isinstance(provider, str):
print(provider)
if provider == 'g4f.Provider.Auto':
return None
- if provider in g4f.Provider.ProviderUtils.convert:
- return g4f.Provider.ProviderUtils.convert[provider]
-
- else:
- return None
+ return g4f.Provider.ProviderUtils.convert.get(provider)
else:
return None
diff --git a/g4f/models.py b/g4f/models.py
index ced8e290..9be8f459 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -65,14 +65,16 @@ gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = RetryProvider([
- DeepAi, ChatgptLogin, ChatgptAi, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn, FreeGpt, Ylokh
+ DeepAi, Aivvm, ChatgptLogin, ChatgptAi, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn, FreeGpt, Ylokh
])
)
gpt_4 = Model(
name = 'gpt-4',
base_provider = 'openai',
- best_provider = Bing
+ best_provider = RetryProvider([
+ Aivvm, Bing
+ ])
)
# Bard
@@ -165,27 +167,32 @@ gpt_35_turbo_16k = Model(
gpt_35_turbo_16k_0613 = Model(
name = 'gpt-3.5-turbo-16k-0613',
- base_provider = 'openai')
+ base_provider = 'openai',
+ best_provider = Aivvm
+)
gpt_35_turbo_0613 = Model(
name = 'gpt-3.5-turbo-0613',
base_provider = 'openai',
- best_provider=Aivvm
+ best_provider = Aivvm
)
gpt_4_0613 = Model(
name = 'gpt-4-0613',
- base_provider = 'openai'
+ base_provider = 'openai',
+ best_provider = Aivvm
)
gpt_4_32k = Model(
name = 'gpt-4-32k',
- base_provider = 'openai'
+ base_provider = 'openai',
+ best_provider = Aivvm
)
gpt_4_32k_0613 = Model(
name = 'gpt-4-32k-0613',
- base_provider = 'openai'
+ base_provider = 'openai',
+ best_provider = Aivvm
)
text_ada_001 = Model(