summaryrefslogtreecommitdiffstats
path: root/testing
diff options
context:
space:
mode:
Diffstat (limited to 'testing')
-rw-r--r--testing/log_time.py25
-rw-r--r--testing/test_async.py35
-rw-r--r--testing/test_chat_completion.py27
-rw-r--r--testing/test_interference.py27
-rw-r--r--testing/test_needs_auth.py96
-rw-r--r--testing/test_providers.py76
6 files changed, 0 insertions, 286 deletions
diff --git a/testing/log_time.py b/testing/log_time.py
deleted file mode 100644
index 376ab86d..00000000
--- a/testing/log_time.py
+++ /dev/null
@@ -1,25 +0,0 @@
-from time import time
-
-
-async def log_time_async(method: callable, **kwargs):
- start = time()
- result = await method(**kwargs)
- secs = f"{round(time() - start, 2)} secs"
- if result:
- return " ".join([result, secs])
- return secs
-
-
-def log_time_yield(method: callable, **kwargs):
- start = time()
- result = yield from method(**kwargs)
- yield f" {round(time() - start, 2)} secs"
-
-
-def log_time(method: callable, **kwargs):
- start = time()
- result = method(**kwargs)
- secs = f"{round(time() - start, 2)} secs"
- if result:
- return " ".join([result, secs])
- return secs \ No newline at end of file
diff --git a/testing/test_async.py b/testing/test_async.py
deleted file mode 100644
index bef2c75f..00000000
--- a/testing/test_async.py
+++ /dev/null
@@ -1,35 +0,0 @@
-import sys
-from pathlib import Path
-import asyncio
-
-sys.path.append(str(Path(__file__).parent.parent))
-
-import g4f
-from g4f.Provider import AsyncProvider
-from testing.test_providers import get_providers
-from testing.log_time import log_time_async
-
-async def create_async(provider):
- model = g4f.models.gpt_35_turbo.name if provider.supports_gpt_35_turbo else g4f.models.default.name
- try:
- response = await log_time_async(
- provider.create_async,
- model=model,
- messages=[{"role": "user", "content": "Hello Assistant!"}]
- )
- print(f"{provider.__name__}:", response)
- except Exception as e:
- return f"{provider.__name__}: {e.__class__.__name__}: {e}"
-
-async def run_async():
- responses: list = [
- create_async(_provider)
- for _provider in get_providers()
- if _provider.working and issubclass(_provider, AsyncProvider)
- ]
- responses = await asyncio.gather(*responses)
- for error in responses:
- if error:
- print(error)
-
-print("Total:", asyncio.run(log_time_async(run_async))) \ No newline at end of file
diff --git a/testing/test_chat_completion.py b/testing/test_chat_completion.py
deleted file mode 100644
index 7600e46b..00000000
--- a/testing/test_chat_completion.py
+++ /dev/null
@@ -1,27 +0,0 @@
-import sys
-from pathlib import Path
-
-sys.path.append(str(Path(__file__).parent.parent))
-
-import g4f, asyncio
-
-print("create:", end=" ", flush=True)
-for response in g4f.ChatCompletion.create(
- model=g4f.models.gpt_4_32k_0613,
- provider=g4f.Provider.Aivvm,
- messages=[{"role": "user", "content": "send a bunch of emojis. i want to test something"}],
- temperature=0.0,
- stream=True
-):
- print(response, end="", flush=True)
-print()
-
-async def run_async():
- response = await g4f.ChatCompletion.create_async(
- model=g4f.models.gpt_35_turbo_16k_0613,
- provider=g4f.Provider.Aivvm,
- messages=[{"role": "user", "content": "hello!"}],
- )
- print("create_async:", response)
-
-# asyncio.run(run_async())
diff --git a/testing/test_interference.py b/testing/test_interference.py
deleted file mode 100644
index d8e85a6c..00000000
--- a/testing/test_interference.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# type: ignore
-import openai
-
-openai.api_key = ""
-openai.api_base = "http://localhost:1337"
-
-
-def main():
- chat_completion = openai.ChatCompletion.create(
- model="gpt-3.5-turbo",
- messages=[{"role": "user", "content": "write a poem about a tree"}],
- stream=True,
- )
-
- if isinstance(chat_completion, dict):
- # not stream
- print(chat_completion.choices[0].message.content)
- else:
- # stream
- for token in chat_completion:
- content = token["choices"][0]["delta"].get("content")
- if content != None:
- print(content, end="", flush=True)
-
-
-if __name__ == "__main__":
- main() \ No newline at end of file
diff --git a/testing/test_needs_auth.py b/testing/test_needs_auth.py
deleted file mode 100644
index 26630e23..00000000
--- a/testing/test_needs_auth.py
+++ /dev/null
@@ -1,96 +0,0 @@
-import sys
-from pathlib import Path
-import asyncio
-
-sys.path.append(str(Path(__file__).parent.parent))
-
-import g4f
-from testing.log_time import log_time, log_time_async, log_time_yield
-
-
-_providers = [
- g4f.Provider.H2o,
- g4f.Provider.You,
- g4f.Provider.HuggingChat,
- g4f.Provider.OpenAssistant,
- g4f.Provider.Bing,
- g4f.Provider.Bard
-]
-
-_instruct = "Hello, are you GPT 4?."
-
-_example = """
-OpenaiChat: Hello! How can I assist you today? 2.0 secs
-Bard: Hello! How can I help you today? 3.44 secs
-Bing: Hello, this is Bing. How can I help? 😊 4.14 secs
-Async Total: 4.25 secs
-
-OpenaiChat: Hello! How can I assist you today? 1.85 secs
-Bard: Hello! How can I help you today? 3.38 secs
-Bing: Hello, this is Bing. How can I help? 😊 6.14 secs
-Stream Total: 11.37 secs
-
-OpenaiChat: Hello! How can I help you today? 3.28 secs
-Bard: Hello there! How can I help you today? 3.58 secs
-Bing: Hello! How can I help you today? 3.28 secs
-No Stream Total: 10.14 secs
-"""
-
-print("Bing: ", end="")
-for response in log_time_yield(
- g4f.ChatCompletion.create,
- model=g4f.models.default,
- messages=[{"role": "user", "content": _instruct}],
- provider=g4f.Provider.Bing,
- #cookies=g4f.get_cookies(".huggingface.co"),
- stream=True,
- auth=True
-):
- print(response, end="", flush=True)
-print()
-print()
-
-
-async def run_async():
- responses = [
- log_time_async(
- provider.create_async,
- model=None,
- messages=[{"role": "user", "content": _instruct}],
- )
- for provider in _providers
- ]
- responses = await asyncio.gather(*responses)
- for idx, provider in enumerate(_providers):
- print(f"{provider.__name__}:", responses[idx])
-print("Async Total:", asyncio.run(log_time_async(run_async)))
-print()
-
-
-def run_stream():
- for provider in _providers:
- print(f"{provider.__name__}: ", end="")
- for response in log_time_yield(
- provider.create_completion,
- model=None,
- messages=[{"role": "user", "content": _instruct}],
- ):
- print(response, end="", flush=True)
- print()
-print("Stream Total:", log_time(run_stream))
-print()
-
-
-def create_no_stream():
- for provider in _providers:
- print(f"{provider.__name__}:", end=" ")
- for response in log_time_yield(
- provider.create_completion,
- model=None,
- messages=[{"role": "user", "content": _instruct}],
- stream=False
- ):
- print(response, end="")
- print()
-print("No Stream Total:", log_time(create_no_stream))
-print() \ No newline at end of file
diff --git a/testing/test_providers.py b/testing/test_providers.py
deleted file mode 100644
index cd82fe7c..00000000
--- a/testing/test_providers.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import sys
-from pathlib import Path
-from colorama import Fore, Style
-
-sys.path.append(str(Path(__file__).parent.parent))
-
-from g4f import BaseProvider, models, Provider
-
-logging = False
-
-
-def main():
- providers = get_providers()
- failed_providers = []
-
- for _provider in providers:
- if _provider.needs_auth:
- continue
- print("Provider:", _provider.__name__)
- result = test(_provider)
- print("Result:", result)
- if _provider.working and not result:
- failed_providers.append(_provider)
-
- print()
-
- if failed_providers:
- print(f"{Fore.RED + Style.BRIGHT}Failed providers:{Style.RESET_ALL}")
- for _provider in failed_providers:
- print(f"{Fore.RED}{_provider.__name__}")
- else:
- print(f"{Fore.GREEN + Style.BRIGHT}All providers are working")
-
-
-def get_providers() -> list[type[BaseProvider]]:
- provider_names = dir(Provider)
- ignore_names = [
- "annotations",
- "base_provider",
- "retry_provider",
- "BaseProvider",
- "AsyncProvider",
- "AsyncGeneratorProvider",
- "RetryProvider",
- ]
- return [
- getattr(Provider, provider_name)
- for provider_name in provider_names
- if not provider_name.startswith("__") and provider_name not in ignore_names
- ]
-
-
-def create_response(_provider: type[BaseProvider]) -> str:
- model = models.gpt_35_turbo.name if _provider.supports_gpt_35_turbo else models.default.name
- response = _provider.create_completion(
- model=model,
- messages=[{"role": "user", "content": "Hello, who are you? Answer in detail much as possible."}],
- stream=False,
- )
- return "".join(response)
-
-
-def test(_provider: type[BaseProvider]) -> bool:
- try:
- response = create_response(_provider)
- assert type(response) is str
- assert len(response) > 0
- return response
- except Exception as e:
- if logging:
- print(e)
- return False
-
-
-if __name__ == "__main__":
- main() \ No newline at end of file