summaryrefslogtreecommitdiffstats
path: root/g4f/Provider
diff options
context:
space:
mode:
Diffstat (limited to 'g4f/Provider')
-rw-r--r--g4f/Provider/Acytoo.py7
-rw-r--r--g4f/Provider/Aichat.py6
-rw-r--r--g4f/Provider/EasyChat.py39
-rw-r--r--g4f/Provider/Equing.py3
-rw-r--r--g4f/Provider/H2o.py2
5 files changed, 43 insertions, 14 deletions
diff --git a/g4f/Provider/Acytoo.py b/g4f/Provider/Acytoo.py
index 32c67c0c..2edd9efd 100644
--- a/g4f/Provider/Acytoo.py
+++ b/g4f/Provider/Acytoo.py
@@ -19,11 +19,12 @@ class Acytoo(BaseProvider):
**kwargs: Any,
) -> CreateResult:
headers = _create_header()
- payload = _create_payload(messages)
+ payload = _create_payload(messages, kwargs.get('temperature', 0.5))
url = "https://chat.acytoo.com/api/completions"
response = requests.post(url=url, headers=headers, json=payload)
response.raise_for_status()
+ response.encoding = "utf-8"
yield response.text
@@ -34,7 +35,7 @@ def _create_header():
}
-def _create_payload(messages: list[dict[str, str]]):
+def _create_payload(messages: list[dict[str, str]], temperature):
payload_messages = [
message | {"createdAt": int(time.time()) * 1000} for message in messages
]
@@ -42,6 +43,6 @@ def _create_payload(messages: list[dict[str, str]]):
"key": "",
"model": "gpt-3.5-turbo",
"messages": payload_messages,
- "temperature": 1,
+ "temperature": temperature,
"password": "",
}
diff --git a/g4f/Provider/Aichat.py b/g4f/Provider/Aichat.py
index 6992d071..a1d90db7 100644
--- a/g4f/Provider/Aichat.py
+++ b/g4f/Provider/Aichat.py
@@ -40,9 +40,9 @@ class Aichat(BaseProvider):
json_data = {
"message": base,
- "temperature": 1,
+ "temperature": kwargs.get('temperature', 0.5),
"presence_penalty": 0,
- "top_p": 1,
+ "top_p": kwargs.get('top_p', 1),
"frequency_penalty": 0,
}
@@ -52,4 +52,6 @@ class Aichat(BaseProvider):
json=json_data,
)
response.raise_for_status()
+ if not response.json()['response']:
+ raise Exception("Error Response: " + response.json())
yield response.json()["message"]
diff --git a/g4f/Provider/EasyChat.py b/g4f/Provider/EasyChat.py
index 59c46ffa..2a61346c 100644
--- a/g4f/Provider/EasyChat.py
+++ b/g4f/Provider/EasyChat.py
@@ -10,6 +10,7 @@ class EasyChat(BaseProvider):
url = "https://free.easychat.work"
supports_stream = True
supports_gpt_35_turbo = True
+ working = True
@staticmethod
def create_completion(
@@ -25,6 +26,7 @@ class EasyChat(BaseProvider):
"https://chat2.fastgpt.me",
"https://chat3.fastgpt.me",
"https://chat4.fastgpt.me",
+ "https://gxos1h1ddt.fastgpt.me"
]
server = active_servers[kwargs.get("active_server", 0)]
headers = {
@@ -34,9 +36,17 @@ class EasyChat(BaseProvider):
"content-type": "application/json",
"origin": f"{server}",
"referer": f"{server}/",
- "sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
- "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
"x-requested-with": "XMLHttpRequest",
+ 'plugins': '0',
+ 'sec-ch-ua': '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Windows"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
+ 'usesearch': 'false',
+ 'x-requested-with': 'XMLHttpRequest'
}
json_data = {
@@ -57,14 +67,25 @@ class EasyChat(BaseProvider):
f"{server}/api/openai/v1/chat/completions",
headers=headers,
json=json_data,
+ stream=stream,
)
-
- response.raise_for_status()
- print(response.text)
- for chunk in response.iter_lines():
- if b"content" in chunk:
- data = json.loads(chunk.decode().split("data: ")[1])
- yield data["choices"][0]["delta"]["content"]
+ if response.status_code == 200:
+ if stream == False:
+ json_data = response.json()
+ if "choices" in json_data:
+ yield json_data["choices"][0]["message"]["content"]
+ else:
+ yield Exception("No response from server")
+ else:
+
+ for chunk in response.iter_lines():
+ if b"content" in chunk:
+ splitData = chunk.decode().split("data: ")
+ if len(splitData) > 1:
+ yield json.loads(splitData[1])["choices"][0]["delta"]["content"]
+ else:
+ yield Exception(f"Error {response.status_code} from server")
+
@classmethod
@property
diff --git a/g4f/Provider/Equing.py b/g4f/Provider/Equing.py
index bcf6cff9..90c865d9 100644
--- a/g4f/Provider/Equing.py
+++ b/g4f/Provider/Equing.py
@@ -53,6 +53,9 @@ class Equing(ABC):
response = requests.post('https://next.eqing.tech/api/openai/v1/chat/completions',
headers=headers, json=json_data, stream=stream)
+ if not stream:
+ yield response.json()["choices"][0]["message"]["content"]
+ return
for line in response.iter_content(chunk_size=1024):
if line:
diff --git a/g4f/Provider/H2o.py b/g4f/Provider/H2o.py
index c2492e59..f9b799bb 100644
--- a/g4f/Provider/H2o.py
+++ b/g4f/Provider/H2o.py
@@ -75,6 +75,8 @@ class H2o(BaseProvider):
headers=headers,
json=data,
)
+ response.raise_for_status()
+ response.encoding = "utf-8"
generated_text = response.text.replace("\n", "").split("data:")
generated_text = json.loads(generated_text[-1])