summaryrefslogtreecommitdiffstats
path: root/etc
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--etc/testing/_providers.py3
-rw-r--r--etc/testing/test_all.py14
-rw-r--r--etc/testing/test_chat_completion.py4
-rw-r--r--etc/tool/create_provider.py51
-rw-r--r--etc/tool/improve_code.py4
-rw-r--r--etc/unittest/__main__.py4
-rw-r--r--etc/unittest/async_client.py56
-rw-r--r--etc/unittest/client.py38
8 files changed, 66 insertions, 108 deletions
diff --git a/etc/testing/_providers.py b/etc/testing/_providers.py
index e2ef0cbe..0d75dd02 100644
--- a/etc/testing/_providers.py
+++ b/etc/testing/_providers.py
@@ -35,7 +35,6 @@ def get_providers() -> list[ProviderType]:
provider
for provider in __providers__
if provider.__name__ not in dir(Provider.deprecated)
- and provider.__name__ not in dir(Provider.unfinished)
and provider.url is not None
]
@@ -59,4 +58,4 @@ def test(provider: ProviderType) -> bool:
if __name__ == "__main__":
main()
- \ No newline at end of file
+
diff --git a/etc/testing/test_all.py b/etc/testing/test_all.py
index 73134e3f..6850627d 100644
--- a/etc/testing/test_all.py
+++ b/etc/testing/test_all.py
@@ -38,21 +38,11 @@ async def test(model: g4f.Model):
async def start_test():
models_to_test = [
- # GPT-3.5 4K Context
+ # GPT-3.5
g4f.models.gpt_35_turbo,
- g4f.models.gpt_35_turbo_0613,
- # GPT-3.5 16K Context
- g4f.models.gpt_35_turbo_16k,
- g4f.models.gpt_35_turbo_16k_0613,
-
- # GPT-4 8K Context
+ # GPT-4
g4f.models.gpt_4,
- g4f.models.gpt_4_0613,
-
- # GPT-4 32K Context
- g4f.models.gpt_4_32k,
- g4f.models.gpt_4_32k_0613,
]
models_working = []
diff --git a/etc/testing/test_chat_completion.py b/etc/testing/test_chat_completion.py
index 615c8be0..6b053b72 100644
--- a/etc/testing/test_chat_completion.py
+++ b/etc/testing/test_chat_completion.py
@@ -8,7 +8,7 @@ import g4f, asyncio
print("create:", end=" ", flush=True)
for response in g4f.ChatCompletion.create(
model=g4f.models.default,
- provider=g4f.Provider.Bing,
+ #provider=g4f.Provider.Bing,
messages=[{"role": "user", "content": "write a poem about a tree"}],
stream=True
):
@@ -18,7 +18,7 @@ print()
async def run_async():
response = await g4f.ChatCompletion.create_async(
model=g4f.models.default,
- provider=g4f.Provider.Bing,
+ #provider=g4f.Provider.Bing,
messages=[{"role": "user", "content": "hello!"}],
)
print("create_async:", response)
diff --git a/etc/tool/create_provider.py b/etc/tool/create_provider.py
index ff04f961..7a9827a8 100644
--- a/etc/tool/create_provider.py
+++ b/etc/tool/create_provider.py
@@ -33,14 +33,35 @@ from __future__ import annotations
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
-class ChatGpt(AsyncGeneratorProvider):
- url = "https://chat-gpt.com"
+class {name}(AsyncGeneratorProvider, ProviderModelMixin):
+ label = ""
+ url = "https://example.com"
+ api_endpoint = "https://example.com/api/completion"
working = True
- supports_gpt_35_turbo = True
+ needs_auth = False
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = ''
+ models = ['', '']
+
+ model_aliases = {
+ "alias1": "model1",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
@classmethod
async def create_async_generator(
@@ -50,19 +71,21 @@ class ChatGpt(AsyncGeneratorProvider):
proxy: str = None,
**kwargs
) -> AsyncResult:
- headers = {
- "authority": "chat-gpt.com",
+ model = cls.get_model(model)
+
+ headers = {{
+ "authority": "example.com",
"accept": "application/json",
"origin": cls.url,
- "referer": f"{cls.url}/chat",
- }
+ "referer": f"{{cls.url}}/chat",
+ }}
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
- data = {
+ data = {{
"prompt": prompt,
- "purpose": "",
- }
- async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response:
+ "model": model,
+ }}
+ async with session.post(f"{{cls.url}}/api/chat", json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content:
if chunk:
@@ -78,7 +101,7 @@ Create a provider from a cURL command. The command is:
{command}
```
A example for a provider:
-```py
+```python
{example}
```
The name for the provider class:
@@ -90,7 +113,7 @@ And replace "gpt-3.5-turbo" with `model`.
print("Create code...")
response = []
for chunk in g4f.ChatCompletion.create(
- model=g4f.models.gpt_35_long,
+ model=g4f.models.default,
messages=[{"role": "user", "content": prompt}],
timeout=300,
stream=True,
diff --git a/etc/tool/improve_code.py b/etc/tool/improve_code.py
index b2e36f86..8578b478 100644
--- a/etc/tool/improve_code.py
+++ b/etc/tool/improve_code.py
@@ -30,7 +30,7 @@ Don't remove license comments.
print("Create code...")
response = []
for chunk in g4f.ChatCompletion.create(
- model=g4f.models.gpt_35_long,
+ model=g4f.models.default,
messages=[{"role": "user", "content": prompt}],
timeout=300,
stream=True
@@ -42,4 +42,4 @@ response = "".join(response)
if code := read_code(response):
with open(path, "w") as file:
- file.write(code) \ No newline at end of file
+ file.write(code)
diff --git a/etc/unittest/__main__.py b/etc/unittest/__main__.py
index 351c2bb3..ee748917 100644
--- a/etc/unittest/__main__.py
+++ b/etc/unittest/__main__.py
@@ -4,8 +4,8 @@ from .backend import *
from .main import *
from .model import *
from .client import *
-from .async_client import *
+from .client import *
from .include import *
from .integration import *
-unittest.main() \ No newline at end of file
+unittest.main()
diff --git a/etc/unittest/async_client.py b/etc/unittest/async_client.py
deleted file mode 100644
index a49b90ed..00000000
--- a/etc/unittest/async_client.py
+++ /dev/null
@@ -1,56 +0,0 @@
-import unittest
-
-from g4f.client import AsyncClient, ChatCompletion, ChatCompletionChunk
-from .mocks import AsyncGeneratorProviderMock, ModelProviderMock, YieldProviderMock
-
-DEFAULT_MESSAGES = [{'role': 'user', 'content': 'Hello'}]
-
-class AsyncTestPassModel(unittest.IsolatedAsyncioTestCase):
-
- async def test_response(self):
- client = AsyncClient(provider=AsyncGeneratorProviderMock)
- response = await client.chat.completions.create(DEFAULT_MESSAGES, "")
- self.assertIsInstance(response, ChatCompletion)
- self.assertEqual("Mock", response.choices[0].message.content)
-
- async def test_pass_model(self):
- client = AsyncClient(provider=ModelProviderMock)
- response = await client.chat.completions.create(DEFAULT_MESSAGES, "Hello")
- self.assertIsInstance(response, ChatCompletion)
- self.assertEqual("Hello", response.choices[0].message.content)
-
- async def test_max_tokens(self):
- client = AsyncClient(provider=YieldProviderMock)
- messages = [{'role': 'user', 'content': chunk} for chunk in ["How ", "are ", "you", "?"]]
- response = await client.chat.completions.create(messages, "Hello", max_tokens=1)
- self.assertIsInstance(response, ChatCompletion)
- self.assertEqual("How ", response.choices[0].message.content)
- response = await client.chat.completions.create(messages, "Hello", max_tokens=2)
- self.assertIsInstance(response, ChatCompletion)
- self.assertEqual("How are ", response.choices[0].message.content)
-
- async def test_max_stream(self):
- client = AsyncClient(provider=YieldProviderMock)
- messages = [{'role': 'user', 'content': chunk} for chunk in ["How ", "are ", "you", "?"]]
- response = client.chat.completions.create(messages, "Hello", stream=True)
- async for chunk in response:
- self.assertIsInstance(chunk, ChatCompletionChunk)
- if chunk.choices[0].delta.content is not None:
- self.assertIsInstance(chunk.choices[0].delta.content, str)
- messages = [{'role': 'user', 'content': chunk} for chunk in ["You ", "You ", "Other", "?"]]
- response = client.chat.completions.create(messages, "Hello", stream=True, max_tokens=2)
- response = [chunk async for chunk in response]
- self.assertEqual(len(response), 3)
- for chunk in response:
- if chunk.choices[0].delta.content is not None:
- self.assertEqual(chunk.choices[0].delta.content, "You ")
-
- async def test_stop(self):
- client = AsyncClient(provider=YieldProviderMock)
- messages = [{'role': 'user', 'content': chunk} for chunk in ["How ", "are ", "you", "?"]]
- response = await client.chat.completions.create(messages, "Hello", stop=["and"])
- self.assertIsInstance(response, ChatCompletion)
- self.assertEqual("How are you?", response.choices[0].message.content)
-
-if __name__ == '__main__':
- unittest.main() \ No newline at end of file
diff --git a/etc/unittest/client.py b/etc/unittest/client.py
index ec8aa4b7..54e2091f 100644
--- a/etc/unittest/client.py
+++ b/etc/unittest/client.py
@@ -5,52 +5,54 @@ from .mocks import AsyncGeneratorProviderMock, ModelProviderMock, YieldProviderM
DEFAULT_MESSAGES = [{'role': 'user', 'content': 'Hello'}]
-class TestPassModel(unittest.TestCase):
+class AsyncTestPassModel(unittest.IsolatedAsyncioTestCase):
- def test_response(self):
+ async def test_response(self):
client = Client(provider=AsyncGeneratorProviderMock)
- response = client.chat.completions.create(DEFAULT_MESSAGES, "")
+ response = await client.chat.completions.async_create(DEFAULT_MESSAGES, "")
self.assertIsInstance(response, ChatCompletion)
self.assertEqual("Mock", response.choices[0].message.content)
- def test_pass_model(self):
+ async def test_pass_model(self):
client = Client(provider=ModelProviderMock)
- response = client.chat.completions.create(DEFAULT_MESSAGES, "Hello")
+ response = await client.chat.completions.async_create(DEFAULT_MESSAGES, "Hello")
self.assertIsInstance(response, ChatCompletion)
self.assertEqual("Hello", response.choices[0].message.content)
- def test_max_tokens(self):
+ async def test_max_tokens(self):
client = Client(provider=YieldProviderMock)
messages = [{'role': 'user', 'content': chunk} for chunk in ["How ", "are ", "you", "?"]]
- response = client.chat.completions.create(messages, "Hello", max_tokens=1)
+ response = await client.chat.completions.async_create(messages, "Hello", max_tokens=1)
self.assertIsInstance(response, ChatCompletion)
self.assertEqual("How ", response.choices[0].message.content)
- response = client.chat.completions.create(messages, "Hello", max_tokens=2)
+ response = await client.chat.completions.async_create(messages, "Hello", max_tokens=2)
self.assertIsInstance(response, ChatCompletion)
self.assertEqual("How are ", response.choices[0].message.content)
- def test_max_stream(self):
+ async def test_max_stream(self):
client = Client(provider=YieldProviderMock)
messages = [{'role': 'user', 'content': chunk} for chunk in ["How ", "are ", "you", "?"]]
- response = client.chat.completions.create(messages, "Hello", stream=True)
- for chunk in response:
+ response = await client.chat.completions.async_create(messages, "Hello", stream=True)
+ async for chunk in response:
self.assertIsInstance(chunk, ChatCompletionChunk)
if chunk.choices[0].delta.content is not None:
self.assertIsInstance(chunk.choices[0].delta.content, str)
messages = [{'role': 'user', 'content': chunk} for chunk in ["You ", "You ", "Other", "?"]]
- response = client.chat.completions.create(messages, "Hello", stream=True, max_tokens=2)
- response = list(response)
- self.assertEqual(len(response), 3)
- for chunk in response:
+ response = await client.chat.completions.async_create(messages, "Hello", stream=True, max_tokens=2)
+ response_list = []
+ async for chunk in response:
+ response_list.append(chunk)
+ self.assertEqual(len(response_list), 3)
+ for chunk in response_list:
if chunk.choices[0].delta.content is not None:
self.assertEqual(chunk.choices[0].delta.content, "You ")
- def test_stop(self):
+ async def test_stop(self):
client = Client(provider=YieldProviderMock)
messages = [{'role': 'user', 'content': chunk} for chunk in ["How ", "are ", "you", "?"]]
- response = client.chat.completions.create(messages, "Hello", stop=["and"])
+ response = await client.chat.completions.async_create(messages, "Hello", stop=["and"])
self.assertIsInstance(response, ChatCompletion)
self.assertEqual("How are you?", response.choices[0].message.content)
if __name__ == '__main__':
- unittest.main() \ No newline at end of file
+ unittest.main()