summaryrefslogtreecommitdiffstats
path: root/testing
diff options
context:
space:
mode:
authorTekky <98614666+xtekky@users.noreply.github.com>2023-09-22 21:40:59 +0200
committerGitHub <noreply@github.com>2023-09-22 21:40:59 +0200
commitba287e89b55118965ff0e151e54636b1f50d3b38 (patch)
treedc69218fecae4971c90ae391ff6919c032b93540 /testing
parent~ | gpt-3.5-turbo-0613 (diff)
parentAdd RetryProvider (diff)
downloadgpt4free-ba287e89b55118965ff0e151e54636b1f50d3b38.tar
gpt4free-ba287e89b55118965ff0e151e54636b1f50d3b38.tar.gz
gpt4free-ba287e89b55118965ff0e151e54636b1f50d3b38.tar.bz2
gpt4free-ba287e89b55118965ff0e151e54636b1f50d3b38.tar.lz
gpt4free-ba287e89b55118965ff0e151e54636b1f50d3b38.tar.xz
gpt4free-ba287e89b55118965ff0e151e54636b1f50d3b38.tar.zst
gpt4free-ba287e89b55118965ff0e151e54636b1f50d3b38.zip
Diffstat (limited to 'testing')
-rw-r--r--testing/test_chat_completion.py23
-rw-r--r--testing/test_providers.py22
2 files changed, 24 insertions, 21 deletions
diff --git a/testing/test_chat_completion.py b/testing/test_chat_completion.py
index fbaa3169..d901e697 100644
--- a/testing/test_chat_completion.py
+++ b/testing/test_chat_completion.py
@@ -3,10 +3,23 @@ from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent))
-import g4f
+import g4f, asyncio
-response = g4f.ChatCompletion.create(
+print("create:", end=" ", flush=True)
+for response in g4f.ChatCompletion.create(
model=g4f.models.gpt_35_turbo,
- messages=[{"role": "user", "content": "hello, are you GPT 4?"}]
-)
-print(response) \ No newline at end of file
+ provider=g4f.Provider.GptGo,
+ messages=[{"role": "user", "content": "hello!"}],
+):
+ print(response, end="", flush=True)
+print()
+
+async def run_async():
+ response = await g4f.ChatCompletion.create_async(
+ model=g4f.models.gpt_35_turbo,
+ provider=g4f.Provider.GptGo,
+ messages=[{"role": "user", "content": "hello!"}],
+ )
+ print("create_async:", response)
+
+asyncio.run(run_async())
diff --git a/testing/test_providers.py b/testing/test_providers.py
index be04e7a3..5240119b 100644
--- a/testing/test_providers.py
+++ b/testing/test_providers.py
@@ -1,6 +1,6 @@
import sys
from pathlib import Path
-from colorama import Fore
+from colorama import Fore, Style
sys.path.append(str(Path(__file__).parent.parent))
@@ -8,10 +8,6 @@ from g4f import BaseProvider, models, Provider
logging = False
-class Styles:
- ENDC = "\033[0m"
- BOLD = "\033[1m"
- UNDERLINE = "\033[4m"
def main():
providers = get_providers()
@@ -29,11 +25,11 @@ def main():
print()
if failed_providers:
- print(f"{Fore.RED + Styles.BOLD}Failed providers:{Styles.ENDC}")
+ print(f"{Fore.RED + Style.BRIGHT}Failed providers:{Style.RESET_ALL}")
for _provider in failed_providers:
print(f"{Fore.RED}{_provider.__name__}")
else:
- print(f"{Fore.GREEN + Styles.BOLD}All providers are working")
+ print(f"{Fore.GREEN + Style.BRIGHT}All providers are working")
def get_providers() -> list[type[BaseProvider]]:
@@ -45,21 +41,15 @@ def get_providers() -> list[type[BaseProvider]]:
"AsyncProvider",
"AsyncGeneratorProvider"
]
- provider_names = [
- provider_name
+ return [
+ getattr(Provider, provider_name)
for provider_name in provider_names
if not provider_name.startswith("__") and provider_name not in ignore_names
]
- return [getattr(Provider, provider_name) for provider_name in provider_names]
def create_response(_provider: type[BaseProvider]) -> str:
- if _provider.supports_gpt_35_turbo:
- model = models.gpt_35_turbo.name
- elif _provider.supports_gpt_4:
- model = models.gpt_4.name
- else:
- model = models.default.name
+ model = models.gpt_35_turbo.name if _provider.supports_gpt_35_turbo else models.default.name
response = _provider.create_completion(
model=model,
messages=[{"role": "user", "content": "Hello, who are you? Answer in detail much as possible."}],