summaryrefslogtreecommitdiffstats
path: root/g4f/Provider
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--g4f/Provider/Providers/AItianhu.py4
-rw-r--r--g4f/Provider/Providers/Ails.py2
-rw-r--r--g4f/Provider/Providers/ChatgptLogin.py2
-rw-r--r--g4f/Provider/Providers/DfeHub.py8
-rw-r--r--g4f/Provider/Providers/EasyChat.py8
-rw-r--r--g4f/Provider/Providers/Lockchat.py2
6 files changed, 13 insertions, 13 deletions
diff --git a/g4f/Provider/Providers/AItianhu.py b/g4f/Provider/Providers/AItianhu.py
index 5a151e1e..0bdaa09a 100644
--- a/g4f/Provider/Providers/AItianhu.py
+++ b/g4f/Provider/Providers/AItianhu.py
@@ -22,8 +22,8 @@ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
"prompt": base,
"options": {},
"systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
- "temperature": 0.8,
- "top_p": 1
+ "temperature": kwargs.get("temperature", 0.8),
+ "top_p": kwargs.get("top_p", 1)
}
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
diff --git a/g4f/Provider/Providers/Ails.py b/g4f/Provider/Providers/Ails.py
index f6174f85..60d3603e 100644
--- a/g4f/Provider/Providers/Ails.py
+++ b/g4f/Provider/Providers/Ails.py
@@ -75,7 +75,7 @@ def _create_completion(model: str, messages: list, temperature: float = 0.6, str
json_data = json.dumps(separators=(',', ':'), obj={
'model': 'gpt-3.5-turbo',
- 'temperature': 0.6,
+ 'temperature': temperature,
'stream': True,
'messages': messages} | sig)
diff --git a/g4f/Provider/Providers/ChatgptLogin.py b/g4f/Provider/Providers/ChatgptLogin.py
index 3659ee17..0fdbab8e 100644
--- a/g4f/Provider/Providers/ChatgptLogin.py
+++ b/g4f/Provider/Providers/ChatgptLogin.py
@@ -75,7 +75,7 @@ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
'userName': '<div class="mwai-name-text">User:</div>',
'aiName': '<div class="mwai-name-text">AI:</div>',
'model': 'gpt-3.5-turbo',
- 'temperature': 0.8,
+ 'temperature': kwargs.get('temperature', 0.8),
'maxTokens': 1024,
'maxResults': 1,
'apiKey': '',
diff --git a/g4f/Provider/Providers/DfeHub.py b/g4f/Provider/Providers/DfeHub.py
index afc9861a..e3ff8045 100644
--- a/g4f/Provider/Providers/DfeHub.py
+++ b/g4f/Provider/Providers/DfeHub.py
@@ -32,10 +32,10 @@ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
json_data = {
'messages': messages,
'model': 'gpt-3.5-turbo',
- 'temperature': 0.5,
- 'presence_penalty': 0,
- 'frequency_penalty': 0,
- 'top_p': 1,
+ 'temperature': kwargs.get('temperature', 0.5),
+ 'presence_penalty': kwargs.get('presence_penalty', 0),
+ 'frequency_penalty': kwargs.get('frequency_penalty', 0),
+ 'top_p': kwargs.get('top_p', 1),
"stream": True,
}
response = requests.post('https://chat.dfehub.com/api/openai/v1/chat/completions',
diff --git a/g4f/Provider/Providers/EasyChat.py b/g4f/Provider/Providers/EasyChat.py
index a59ea072..909428fa 100644
--- a/g4f/Provider/Providers/EasyChat.py
+++ b/g4f/Provider/Providers/EasyChat.py
@@ -34,10 +34,10 @@ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
'messages': messages,
'stream': True,
'model': model,
- 'temperature': 0.5,
- 'presence_penalty': 0,
- 'frequency_penalty': 0,
- 'top_p': 1,
+ 'temperature': kwargs.get('temperature', 0.5),
+ 'presence_penalty': kwargs.get('presence_penalty', 0),
+ 'frequency_penalty': kwargs.get('frequency_penalty', 0),
+ 'top_p': kwargs.get('top_p', 1),
}
response = requests.post('https://free.easychat.work/api/openai/v1/chat/completions',
diff --git a/g4f/Provider/Providers/Lockchat.py b/g4f/Provider/Providers/Lockchat.py
index 489356ce..dd1edb84 100644
--- a/g4f/Provider/Providers/Lockchat.py
+++ b/g4f/Provider/Providers/Lockchat.py
@@ -11,7 +11,7 @@ working = False
def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs):
payload = {
- "temperature": 0.7,
+ "temperature": temperature,
"messages": messages,
"model": model,
"stream": True,