summaryrefslogtreecommitdiffstats
path: root/testing
diff options
context:
space:
mode:
Diffstat (limited to 'testing')
-rw-r--r--testing/log_time.py25
-rw-r--r--testing/test_chat_completion.py2
-rw-r--r--testing/test_needs_auth.py96
-rw-r--r--testing/test_providers.py63
4 files changed, 158 insertions, 28 deletions
diff --git a/testing/log_time.py b/testing/log_time.py
new file mode 100644
index 00000000..7d268128
--- /dev/null
+++ b/testing/log_time.py
@@ -0,0 +1,25 @@
+from time import time
+
+
+async def log_time_async(method: callable, **kwargs):
+ start = time()
+ result = await method(**kwargs)
+ secs = f"{round(time() - start, 2)} secs"
+ if result:
+ return " ".join([result, secs])
+ return secs
+
+
+def log_time_yield(method: callable, **kwargs):
+ start = time()
+ result = yield from method(**kwargs)
+ yield f" {round(time() - start, 2)} secs"
+
+
+def log_time(method: callable, **kwargs):
+ start = time()
+ result = method(**kwargs)
+ secs = f"{round(time() - start, 2)} secs"
+ if result:
+ return " ".join([result, secs])
+ return secs
diff --git a/testing/test_chat_completion.py b/testing/test_chat_completion.py
index d091d47b..32c069be 100644
--- a/testing/test_chat_completion.py
+++ b/testing/test_chat_completion.py
@@ -14,4 +14,4 @@ response = g4f.ChatCompletion.create(
active_server=5,
)
-print(response) \ No newline at end of file
+print(response)
diff --git a/testing/test_needs_auth.py b/testing/test_needs_auth.py
new file mode 100644
index 00000000..3cef1c61
--- /dev/null
+++ b/testing/test_needs_auth.py
@@ -0,0 +1,96 @@
+import sys
+from pathlib import Path
+import asyncio
+
+sys.path.append(str(Path(__file__).parent.parent))
+
+import g4f
+from testing.log_time import log_time, log_time_async, log_time_yield
+
+
+_providers = [
+ g4f.Provider.H2o,
+ g4f.Provider.You,
+ g4f.Provider.HuggingChat,
+ g4f.Provider.OpenAssistant,
+ g4f.Provider.Bing,
+ g4f.Provider.Bard
+]
+
+_instruct = "Hello, tell about you in one sentence."
+
+_example = """
+OpenaiChat: Hello! How can I assist you today? 2.0 secs
+Bard: Hello! How can I help you today? 3.44 secs
+Bing: Hello, this is Bing. How can I help? 😊 4.14 secs
+Async Total: 4.25 secs
+
+OpenaiChat: Hello! How can I assist you today? 1.85 secs
+Bard: Hello! How can I help you today? 3.38 secs
+Bing: Hello, this is Bing. How can I help? 😊 6.14 secs
+Stream Total: 11.37 secs
+
+OpenaiChat: Hello! How can I help you today? 3.28 secs
+Bard: Hello there! How can I help you today? 3.58 secs
+Bing: Hello! How can I help you today? 3.28 secs
+No Stream Total: 10.14 secs
+"""
+
+print("Bing: ", end="")
+for response in log_time_yield(
+ g4f.ChatCompletion.create,
+ model=g4f.models.gpt_35_turbo,
+ messages=[{"role": "user", "content": _instruct}],
+ provider=g4f.Provider.Bing,
+ #cookies=g4f.get_cookies(".huggingface.co"),
+ #stream=True,
+ auth=True
+):
+ print(response, end="")
+print()
+print()
+
+
+async def run_async():
+ responses = [
+ log_time_async(
+ provider.create_async,
+ model=None,
+ messages=[{"role": "user", "content": _instruct}],
+ )
+ for provider in _providers
+ ]
+ responses = await asyncio.gather(*responses)
+ for idx, provider in enumerate(_providers):
+ print(f"{provider.__name__}:", responses[idx])
+print("Async Total:", asyncio.run(log_time_async(run_async)))
+print()
+
+
+def run_stream():
+ for provider in _providers:
+ print(f"{provider.__name__}: ", end="")
+ for response in log_time_yield(
+ provider.create_completion,
+ model=None,
+ messages=[{"role": "user", "content": _instruct}],
+ ):
+ print(response, end="")
+ print()
+print("Stream Total:", log_time(run_stream))
+print()
+
+
+def create_no_stream():
+ for provider in _providers:
+ print(f"{provider.__name__}:", end=" ")
+ for response in log_time_yield(
+ provider.create_completion,
+ model=None,
+ messages=[{"role": "user", "content": _instruct}],
+ stream=False
+ ):
+ print(response, end="")
+ print()
+print("No Stream Total:", log_time(create_no_stream))
+print() \ No newline at end of file
diff --git a/testing/test_providers.py b/testing/test_providers.py
index a5c6f87b..c4fcbc0c 100644
--- a/testing/test_providers.py
+++ b/testing/test_providers.py
@@ -1,67 +1,76 @@
import sys
from pathlib import Path
+from colorama import Fore
sys.path.append(str(Path(__file__).parent.parent))
-from g4f import BaseProvider, models, provider
+from g4f import BaseProvider, models, Provider
+logging = False
def main():
providers = get_providers()
- results: list[list[str | bool]] = []
+ failed_providers = []
for _provider in providers:
- print("start", _provider.__name__)
- actual_working = judge(_provider)
- expected_working = _provider.working
- match = actual_working == expected_working
+ if _provider.needs_auth:
+ continue
+ print("Provider:", _provider.__name__)
+ result = test(_provider)
+ print("Result:", result)
+ if _provider.working and not result:
+ failed_providers.append(_provider)
- results.append([_provider.__name__, expected_working, actual_working, match])
+ print()
- print("failed provider list")
- for result in results:
- if not result[3]:
- print(result)
+ if failed_providers:
+ print(f"{Fore.RED}Failed providers:\n")
+ for _provider in failed_providers:
+ print(f"{Fore.RED}{_provider.__name__}")
+ else:
+ print(f"{Fore.GREEN}All providers are working")
def get_providers() -> list[type[BaseProvider]]:
- provider_names = dir(provider)
+ provider_names = dir(Provider)
ignore_names = [
"base_provider",
- "BaseProvider",
+ "BaseProvider"
]
provider_names = [
provider_name
for provider_name in provider_names
if not provider_name.startswith("__") and provider_name not in ignore_names
]
- return [getattr(provider, provider_name) for provider_name in provider_names]
+ return [getattr(Provider, provider_name) for provider_name in sorted(provider_names)]
def create_response(_provider: type[BaseProvider]) -> str:
- model = (
- models.gpt_35_turbo.name
- if _provider is not provider.H2o
- else models.falcon_7b.name
- )
+ if _provider.supports_gpt_35_turbo:
+ model = models.gpt_35_turbo.name
+ elif _provider.supports_gpt_4:
+ model = models.gpt_4
+ elif hasattr(_provider, "model"):
+ model = _provider.model
+ else:
+ model = None
response = _provider.create_completion(
model=model,
- messages=[{"role": "user", "content": "Hello world!, plz yourself"}],
+ messages=[{"role": "user", "content": "Hello"}],
stream=False,
)
return "".join(response)
-
-def judge(_provider: type[BaseProvider]) -> bool:
- if _provider.needs_auth:
- return _provider.working
-
+
+def test(_provider: type[BaseProvider]) -> bool:
try:
response = create_response(_provider)
assert type(response) is str
- return len(response) > 1
+ assert len(response) > 0
+ return response
except Exception as e:
- print(e)
+ if logging:
+ print(e)
return False