summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/not_working
diff options
context:
space:
mode:
Diffstat (limited to 'g4f/Provider/not_working')
-rw-r--r--g4f/Provider/not_working/AI365VIP.py69
-rw-r--r--g4f/Provider/not_working/AIChatFree.py76
-rw-r--r--g4f/Provider/not_working/AiChatOnline.py61
-rw-r--r--g4f/Provider/not_working/AiChats.py105
-rw-r--r--g4f/Provider/not_working/AmigoChat.py189
-rw-r--r--g4f/Provider/not_working/Aura.py49
-rw-r--r--g4f/Provider/not_working/Chatgpt4Online.py78
-rw-r--r--g4f/Provider/not_working/Chatgpt4o.py88
-rw-r--r--g4f/Provider/not_working/ChatgptFree.py106
-rw-r--r--g4f/Provider/not_working/FlowGpt.py101
-rw-r--r--g4f/Provider/not_working/FreeNetfly.py105
-rw-r--r--g4f/Provider/not_working/GPROChat.py67
-rw-r--r--g4f/Provider/not_working/Koala.py79
-rw-r--r--g4f/Provider/not_working/MyShell.py76
-rw-r--r--g4f/Provider/not_working/__init__.py14
15 files changed, 1263 insertions, 0 deletions
diff --git a/g4f/Provider/not_working/AI365VIP.py b/g4f/Provider/not_working/AI365VIP.py
new file mode 100644
index 00000000..a4bac0e2
--- /dev/null
+++ b/g4f/Provider/not_working/AI365VIP.py
@@ -0,0 +1,69 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+
+class AI365VIP(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://chat.ai365vip.com"
+ api_endpoint = "/api/chat"
+ working = False
+ default_model = 'gpt-3.5-turbo'
+ models = [
+ 'gpt-3.5-turbo',
+ 'gpt-3.5-turbo-16k',
+ 'gpt-4o',
+ ]
+ model_aliases = {
+ "gpt-3.5-turbo": "gpt-3.5-turbo-16k",
+ }
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "origin": cls.url,
+ "referer": f"{cls.url}/en",
+ "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
+ "sec-ch-ua-arch": '"x86"',
+ "sec-ch-ua-bitness": '"64"',
+ "sec-ch-ua-full-version": '"127.0.6533.119"',
+ "sec-ch-ua-full-version-list": '"Chromium";v="127.0.6533.119", "Not)A;Brand";v="99.0.0.0"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-model": '""',
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-ch-ua-platform-version": '"4.19.276"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "model": {
+ "id": model,
+ "name": "GPT-3.5",
+ "maxLength": 3000,
+ "tokenLimit": 2048
+ },
+ "messages": [{"role": "user", "content": format_prompt(messages)}],
+ "key": "",
+ "prompt": "You are a helpful assistant.",
+ "temperature": 1
+ }
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content:
+ if chunk:
+ yield chunk.decode()
diff --git a/g4f/Provider/not_working/AIChatFree.py b/g4f/Provider/not_working/AIChatFree.py
new file mode 100644
index 00000000..a4f80d47
--- /dev/null
+++ b/g4f/Provider/not_working/AIChatFree.py
@@ -0,0 +1,76 @@
+from __future__ import annotations
+
+import time
+from hashlib import sha256
+
+from aiohttp import BaseConnector, ClientSession
+
+from ...errors import RateLimitError
+from ...requests import raise_for_status
+from ...requests.aiohttp import get_connector
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+
+
+class AIChatFree(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://aichatfree.info/"
+ working = False
+ supports_stream = True
+ supports_message_history = True
+ default_model = 'gemini-pro'
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ connector: BaseConnector = None,
+ **kwargs,
+ ) -> AsyncResult:
+ headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:122.0) Gecko/20100101 Firefox/122.0",
+ "Accept": "*/*",
+ "Accept-Language": "en-US,en;q=0.5",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Content-Type": "text/plain;charset=UTF-8",
+ "Referer": f"{cls.url}/",
+ "Origin": cls.url,
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "Connection": "keep-alive",
+ "TE": "trailers",
+ }
+ async with ClientSession(
+ connector=get_connector(connector, proxy), headers=headers
+ ) as session:
+ timestamp = int(time.time() * 1e3)
+ data = {
+ "messages": [
+ {
+ "role": "model" if message["role"] == "assistant" else "user",
+ "parts": [{"text": message["content"]}],
+ }
+ for message in messages
+ ],
+ "time": timestamp,
+ "pass": None,
+ "sign": generate_signature(timestamp, messages[-1]["content"]),
+ }
+ async with session.post(
+ f"{cls.url}/api/generate", json=data, proxy=proxy
+ ) as response:
+ if response.status == 500:
+ if "Quota exceeded" in await response.text():
+ raise RateLimitError(
+ f"Response {response.status}: Rate limit reached"
+ )
+ await raise_for_status(response)
+ async for chunk in response.content.iter_any():
+ yield chunk.decode(errors="ignore")
+
+
+def generate_signature(time: int, text: str, secret: str = ""):
+ message = f"{time}:{text}:{secret}"
+ return sha256(message.encode()).hexdigest()
diff --git a/g4f/Provider/not_working/AiChatOnline.py b/g4f/Provider/not_working/AiChatOnline.py
new file mode 100644
index 00000000..ccfc691e
--- /dev/null
+++ b/g4f/Provider/not_working/AiChatOnline.py
@@ -0,0 +1,61 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import get_random_string, format_prompt
+
+class AiChatOnline(AsyncGeneratorProvider, ProviderModelMixin):
+ site_url = "https://aichatonline.org"
+ url = "https://aichatonlineorg.erweima.ai"
+ api_endpoint = "/aichatonline/api/chat/gpt"
+ working = False
+ default_model = 'gpt-4o-mini'
+
+ @classmethod
+ async def grab_token(
+ cls,
+ session: ClientSession,
+ proxy: str
+ ):
+ async with session.get(f'https://aichatonlineorg.erweima.ai/api/v1/user/getUniqueId?canvas=-{get_random_string()}', proxy=proxy) as response:
+ response.raise_for_status()
+ return (await response.json())['data']
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
+ "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Referer": f"{cls.url}/chatgpt/chat/",
+ "Content-Type": "application/json",
+ "Origin": cls.url,
+ "Alt-Used": "aichatonline.org",
+ "Connection": "keep-alive",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "TE": "trailers"
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "conversationId": get_random_string(),
+ "prompt": format_prompt(messages),
+ }
+ headers['UniqueId'] = await cls.grab_token(session, proxy)
+ async with session.post(f"{cls.url}{cls.api_endpoint}", headers=headers, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content:
+ try:
+ yield json.loads(chunk)['data']['message']
+ except:
+ continue \ No newline at end of file
diff --git a/g4f/Provider/not_working/AiChats.py b/g4f/Provider/not_working/AiChats.py
new file mode 100644
index 00000000..51a85c91
--- /dev/null
+++ b/g4f/Provider/not_working/AiChats.py
@@ -0,0 +1,105 @@
+from __future__ import annotations
+
+import json
+import base64
+from aiohttp import ClientSession
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...image import ImageResponse
+from ..helper import format_prompt
+
+class AiChats(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://ai-chats.org"
+ api_endpoint = "https://ai-chats.org/chat/send2/"
+ working = False
+ supports_message_history = True
+ default_model = 'gpt-4'
+ models = ['gpt-4', 'dalle']
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "application/json, text/event-stream",
+ "accept-language": "en-US,en;q=0.9",
+ "cache-control": "no-cache",
+ "content-type": "application/json",
+ "origin": cls.url,
+ "pragma": "no-cache",
+ "referer": f"{cls.url}/{'image' if model == 'dalle' else 'chat'}/",
+ "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
+ 'cookie': 'muVyak=LSFNvUWqdgKkGprbDBsfieIoEMzjOQ; LSFNvUWqdgKkGprbDBsfieIoEMzjOQ=ac28831b98143847e83dbe004404e619-1725548624-1725548621; muVyak_hits=9; ai-chat-front=9d714d5dc46a6b47607c9a55e7d12a95; _csrf-front=76c23dc0a013e5d1e21baad2e6ba2b5fdab8d3d8a1d1281aa292353f8147b057a%3A2%3A%7Bi%3A0%3Bs%3A11%3A%22_csrf-front%22%3Bi%3A1%3Bs%3A32%3A%22K9lz0ezsNPMNnfpd_8gT5yEeh-55-cch%22%3B%7D',
+ }
+
+ async with ClientSession(headers=headers) as session:
+ if model == 'dalle':
+ prompt = messages[-1]['content'] if messages else ""
+ else:
+ prompt = format_prompt(messages)
+
+ data = {
+ "type": "image" if model == 'dalle' else "chat",
+ "messagesHistory": [
+ {
+ "from": "you",
+ "content": prompt
+ }
+ ]
+ }
+
+ try:
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+
+ if model == 'dalle':
+ response_json = await response.json()
+
+ if 'data' in response_json and response_json['data']:
+ image_url = response_json['data'][0].get('url')
+ if image_url:
+ async with session.get(image_url) as img_response:
+ img_response.raise_for_status()
+ image_data = await img_response.read()
+
+ base64_image = base64.b64encode(image_data).decode('utf-8')
+ base64_url = f"data:image/png;base64,{base64_image}"
+ yield ImageResponse(base64_url, prompt)
+ else:
+ yield f"Error: No image URL found in the response. Full response: {response_json}"
+ else:
+ yield f"Error: Unexpected response format. Full response: {response_json}"
+ else:
+ full_response = await response.text()
+ message = ""
+ for line in full_response.split('\n'):
+ if line.startswith('data: ') and line != 'data: ':
+ message += line[6:]
+
+ message = message.strip()
+ yield message
+ except Exception as e:
+ yield f"Error occurred: {str(e)}"
+
+ @classmethod
+ async def create_async(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> str:
+ async for response in cls.create_async_generator(model, messages, proxy, **kwargs):
+ if isinstance(response, ImageResponse):
+ return response.images[0]
+ return response
diff --git a/g4f/Provider/not_working/AmigoChat.py b/g4f/Provider/not_working/AmigoChat.py
new file mode 100644
index 00000000..274a5e14
--- /dev/null
+++ b/g4f/Provider/not_working/AmigoChat.py
@@ -0,0 +1,189 @@
+from __future__ import annotations
+
+import json
+import uuid
+from aiohttp import ClientSession, ClientTimeout, ClientResponseError
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+from ...image import ImageResponse
+
+class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://amigochat.io/chat/"
+ chat_api_endpoint = "https://api.amigochat.io/v1/chat/completions"
+ image_api_endpoint = "https://api.amigochat.io/v1/images/generations"
+ working = False
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'gpt-4o-mini'
+
+ chat_models = [
+ 'gpt-4o',
+ default_model,
+ 'o1-preview',
+ 'o1-mini',
+ 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo',
+ 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo',
+ 'claude-3-sonnet-20240229',
+ 'gemini-1.5-pro',
+ ]
+
+ image_models = [
+ 'flux-pro/v1.1',
+ 'flux-realism',
+ 'flux-pro',
+ 'dalle-e-3',
+ ]
+
+ models = [*chat_models, *image_models]
+
+ model_aliases = {
+ "o1": "o1-preview",
+ "llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
+ "llama-3.2-90b": "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
+ "claude-3.5-sonnet": "claude-3-sonnet-20240229",
+ "gemini-pro": "gemini-1.5-pro",
+
+ "flux-pro": "flux-pro/v1.1",
+ "dalle-3": "dalle-e-3",
+ }
+
+ persona_ids = {
+ 'gpt-4o': "gpt",
+ 'gpt-4o-mini': "amigo",
+ 'o1-preview': "openai-o-one",
+ 'o1-mini': "openai-o-one-mini",
+ 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo': "llama-three-point-one",
+ 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo': "llama-3-2",
+ 'claude-3-sonnet-20240229': "claude",
+ 'gemini-1.5-pro': "gemini-1-5-pro",
+ 'flux-pro/v1.1': "flux-1-1-pro",
+ 'flux-realism': "flux-realism",
+ 'flux-pro': "flux-pro",
+ 'dalle-e-3': "dalle-three",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ def get_personaId(cls, model: str) -> str:
+ return cls.persona_ids[model]
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ stream: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ device_uuid = str(uuid.uuid4())
+ max_retries = 3
+ retry_count = 0
+
+ while retry_count < max_retries:
+ try:
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "authorization": "Bearer",
+ "cache-control": "no-cache",
+ "content-type": "application/json",
+ "origin": cls.url,
+ "pragma": "no-cache",
+ "priority": "u=1, i",
+ "referer": f"{cls.url}/",
+ "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
+ "x-device-language": "en-US",
+ "x-device-platform": "web",
+ "x-device-uuid": device_uuid,
+ "x-device-version": "1.0.32"
+ }
+
+ async with ClientSession(headers=headers) as session:
+ if model in cls.chat_models:
+ # Chat completion
+ data = {
+ "messages": [{"role": m["role"], "content": m["content"]} for m in messages],
+ "model": model,
+ "personaId": cls.get_personaId(model),
+ "frequency_penalty": 0,
+ "max_tokens": 4000,
+ "presence_penalty": 0,
+ "stream": stream,
+ "temperature": 0.5,
+ "top_p": 0.95
+ }
+
+ timeout = ClientTimeout(total=300) # 5 minutes timeout
+ async with session.post(cls.chat_api_endpoint, json=data, proxy=proxy, timeout=timeout) as response:
+ if response.status not in (200, 201):
+ error_text = await response.text()
+ raise Exception(f"Error {response.status}: {error_text}")
+
+ async for line in response.content:
+ line = line.decode('utf-8').strip()
+ if line.startswith('data: '):
+ if line == 'data: [DONE]':
+ break
+ try:
+ chunk = json.loads(line[6:]) # Remove 'data: ' prefix
+ if 'choices' in chunk and len(chunk['choices']) > 0:
+ choice = chunk['choices'][0]
+ if 'delta' in choice:
+ content = choice['delta'].get('content')
+ elif 'text' in choice:
+ content = choice['text']
+ else:
+ content = None
+ if content:
+ yield content
+ except json.JSONDecodeError:
+ pass
+ else:
+ # Image generation
+ prompt = messages[-1]['content']
+ data = {
+ "prompt": prompt,
+ "model": model,
+ "personaId": cls.get_personaId(model)
+ }
+ async with session.post(cls.image_api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+
+ response_data = await response.json()
+
+ if "data" in response_data:
+ image_urls = []
+ for item in response_data["data"]:
+ if "url" in item:
+ image_url = item["url"]
+ image_urls.append(image_url)
+ if image_urls:
+ yield ImageResponse(image_urls, prompt)
+ else:
+ yield None
+
+ break
+
+ except (ClientResponseError, Exception) as e:
+ retry_count += 1
+ if retry_count >= max_retries:
+ raise e
+ device_uuid = str(uuid.uuid4())
diff --git a/g4f/Provider/not_working/Aura.py b/g4f/Provider/not_working/Aura.py
new file mode 100644
index 00000000..e841d909
--- /dev/null
+++ b/g4f/Provider/not_working/Aura.py
@@ -0,0 +1,49 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ...requests import get_args_from_browser
+from ...webdriver import WebDriver
+
+class Aura(AsyncGeneratorProvider):
+ url = "https://openchat.team"
+ working = False
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ temperature: float = 0.5,
+ max_tokens: int = 8192,
+ webdriver: WebDriver = None,
+ **kwargs
+ ) -> AsyncResult:
+ args = get_args_from_browser(cls.url, webdriver, proxy)
+ async with ClientSession(**args) as session:
+ new_messages = []
+ system_message = []
+ for message in messages:
+ if message["role"] == "system":
+ system_message.append(message["content"])
+ else:
+ new_messages.append(message)
+ data = {
+ "model": {
+ "id": "openchat_3.6",
+ "name": "OpenChat 3.6 (latest)",
+ "maxLength": 24576,
+ "tokenLimit": max_tokens
+ },
+ "messages": new_messages,
+ "key": "",
+ "prompt": "\n".join(system_message),
+ "temperature": temperature
+ }
+ async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content.iter_any():
+ yield chunk.decode(error="ignore")
diff --git a/g4f/Provider/not_working/Chatgpt4Online.py b/g4f/Provider/not_working/Chatgpt4Online.py
new file mode 100644
index 00000000..b0552e45
--- /dev/null
+++ b/g4f/Provider/not_working/Chatgpt4Online.py
@@ -0,0 +1,78 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt
+
+
+class Chatgpt4Online(AsyncGeneratorProvider):
+ url = "https://chatgpt4online.org"
+ api_endpoint = "/wp-json/mwai-ui/v1/chats/submit"
+ working = False
+
+ default_model = 'gpt-4'
+ models = [default_model]
+
+ async def get_nonce(headers: dict) -> str:
+ async with ClientSession(headers=headers) as session:
+ async with session.post(f"https://chatgpt4online.org/wp-json/mwai/v1/start_session") as response:
+ return (await response.json())["restNonce"]
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "text/event-stream",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "dnt": "1",
+ "origin": cls.url,
+ "priority": "u=1, i",
+ "referer": f"{cls.url}/",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
+ }
+ headers['x-wp-nonce'] = await cls.get_nonce(headers)
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ data = {
+ "botId": "default",
+ "newMessage": prompt,
+ "stream": True,
+ }
+
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ full_response = ""
+
+ async for chunk in response.content.iter_any():
+ if chunk:
+ try:
+ # Extract the JSON object from the chunk
+ for line in chunk.decode().splitlines():
+ if line.startswith("data: "):
+ json_data = json.loads(line[6:])
+ if json_data["type"] == "live":
+ full_response += json_data["data"]
+ elif json_data["type"] == "end":
+ final_data = json.loads(json_data["data"])
+ full_response = final_data["reply"]
+ break
+ except json.JSONDecodeError:
+ continue
+
+ yield full_response
+
diff --git a/g4f/Provider/not_working/Chatgpt4o.py b/g4f/Provider/not_working/Chatgpt4o.py
new file mode 100644
index 00000000..ba264d40
--- /dev/null
+++ b/g4f/Provider/not_working/Chatgpt4o.py
@@ -0,0 +1,88 @@
+from __future__ import annotations
+
+import re
+from ...requests import StreamSession, raise_for_status
+from ...typing import Messages
+from ..base_provider import AsyncProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+
+class Chatgpt4o(AsyncProvider, ProviderModelMixin):
+ url = "https://chatgpt4o.one"
+ working = False
+ _post_id = None
+ _nonce = None
+ default_model = 'gpt-4o-mini-2024-07-18'
+ models = [
+ 'gpt-4o-mini-2024-07-18',
+ ]
+ model_aliases = {
+ "gpt-4o-mini": "gpt-4o-mini-2024-07-18",
+ }
+
+
+ @classmethod
+ async def create_async(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ timeout: int = 120,
+ cookies: dict = None,
+ **kwargs
+ ) -> str:
+ headers = {
+ 'authority': 'chatgpt4o.one',
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'origin': 'https://chatgpt4o.one',
+ 'referer': 'https://chatgpt4o.one',
+ 'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
+ }
+
+ async with StreamSession(
+ headers=headers,
+ cookies=cookies,
+ impersonate="chrome",
+ proxies={"all": proxy},
+ timeout=timeout
+ ) as session:
+
+ if not cls._post_id or not cls._nonce:
+ async with session.get(f"{cls.url}/") as response:
+ await raise_for_status(response)
+ response_text = await response.text()
+
+ post_id_match = re.search(r'data-post-id="([0-9]+)"', response_text)
+ nonce_match = re.search(r'data-nonce="(.*?)"', response_text)
+
+ if not post_id_match:
+ raise RuntimeError("No post ID found")
+ cls._post_id = post_id_match.group(1)
+
+ if not nonce_match:
+ raise RuntimeError("No nonce found")
+ cls._nonce = nonce_match.group(1)
+
+ prompt = format_prompt(messages)
+ data = {
+ "_wpnonce": cls._nonce,
+ "post_id": cls._post_id,
+ "url": cls.url,
+ "action": "wpaicg_chat_shortcode_message",
+ "message": prompt,
+ "bot_id": "0"
+ }
+
+ async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, cookies=cookies) as response:
+ await raise_for_status(response)
+ response_json = await response.json()
+ if "data" not in response_json:
+ raise RuntimeError("Unexpected response structure: 'data' field missing")
+ return response_json["data"]
diff --git a/g4f/Provider/not_working/ChatgptFree.py b/g4f/Provider/not_working/ChatgptFree.py
new file mode 100644
index 00000000..6b3877b1
--- /dev/null
+++ b/g4f/Provider/not_working/ChatgptFree.py
@@ -0,0 +1,106 @@
+from __future__ import annotations
+
+import re
+import json
+import asyncio
+from ...requests import StreamSession, raise_for_status
+from ...typing import Messages, AsyncGenerator
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+class ChatgptFree(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://chatgptfree.ai"
+ working = False
+ _post_id = None
+ _nonce = None
+ default_model = 'gpt-4o-mini-2024-07-18'
+ models = [default_model]
+ model_aliases = {
+ "gpt-4o-mini": "gpt-4o-mini-2024-07-18",
+ }
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ timeout: int = 120,
+ cookies: dict = None,
+ **kwargs
+ ) -> AsyncGenerator[str, None]:
+ headers = {
+ 'authority': 'chatgptfree.ai',
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'origin': 'https://chatgptfree.ai',
+ 'referer': 'https://chatgptfree.ai/chat/',
+ 'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
+ }
+
+ async with StreamSession(
+ headers=headers,
+ cookies=cookies,
+ impersonate="chrome",
+ proxies={"all": proxy},
+ timeout=timeout
+ ) as session:
+
+ if not cls._nonce:
+ async with session.get(f"{cls.url}/") as response:
+ await raise_for_status(response)
+ response = await response.text()
+
+ result = re.search(r'data-post-id="([0-9]+)"', response)
+ if not result:
+ raise RuntimeError("No post id found")
+ cls._post_id = result.group(1)
+
+ result = re.search(r'data-nonce="(.*?)"', response)
+ if result:
+ cls._nonce = result.group(1)
+ else:
+ raise RuntimeError("No nonce found")
+
+ prompt = format_prompt(messages)
+ data = {
+ "_wpnonce": cls._nonce,
+ "post_id": cls._post_id,
+ "url": cls.url,
+ "action": "wpaicg_chat_shortcode_message",
+ "message": prompt,
+ "bot_id": "0"
+ }
+
+ async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, cookies=cookies) as response:
+ await raise_for_status(response)
+ buffer = ""
+ async for line in response.iter_lines():
+ line = line.decode('utf-8').strip()
+ if line.startswith('data: '):
+ data = line[6:]
+ if data == '[DONE]':
+ break
+ try:
+ json_data = json.loads(data)
+ content = json_data['choices'][0]['delta'].get('content', '')
+ if content:
+ yield content
+ except json.JSONDecodeError:
+ continue
+ elif line:
+ buffer += line
+
+ if buffer:
+ try:
+ json_response = json.loads(buffer)
+ if 'data' in json_response:
+ yield json_response['data']
+ except json.JSONDecodeError:
+ print(f"Failed to decode final JSON. Buffer content: {buffer}")
diff --git a/g4f/Provider/not_working/FlowGpt.py b/g4f/Provider/not_working/FlowGpt.py
new file mode 100644
index 00000000..b7d8537a
--- /dev/null
+++ b/g4f/Provider/not_working/FlowGpt.py
@@ -0,0 +1,101 @@
+from __future__ import annotations
+
+import json
+import time
+import hashlib
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import get_random_hex, get_random_string
+from ...requests.raise_for_status import raise_for_status
+
+class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://flowgpt.com/chat"
+ working = False
+ supports_message_history = True
+ supports_system_message = True
+ default_model = "gpt-3.5-turbo"
+ models = [
+ "gpt-3.5-turbo",
+ "gpt-3.5-long",
+ "gpt-4-turbo",
+ "google-gemini",
+ "claude-instant",
+ "claude-v1",
+ "claude-v2",
+ "llama2-13b",
+ "mythalion-13b",
+ "pygmalion-13b",
+ "chronos-hermes-13b",
+ "Mixtral-8x7B",
+ "Dolphin-2.6-8x7B",
+ ]
+ model_aliases = {
+ "gemini": "google-gemini",
+ "gemini-pro": "google-gemini"
+ }
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ temperature: float = 0.7,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+ timestamp = str(int(time.time()))
+ auth = "Bearer null"
+ nonce = get_random_hex()
+ data = f"{timestamp}-{nonce}-{auth}"
+ signature = hashlib.md5(data.encode()).hexdigest()
+
+ headers = {
+ "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0",
+ "Accept": "*/*",
+ "Accept-Language": "en-US;q=0.7,en;q=0.3",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Referer": "https://flowgpt.com/",
+ "Content-Type": "application/json",
+ "Authorization": "Bearer null",
+ "Origin": "https://flowgpt.com",
+ "Connection": "keep-alive",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-site",
+ "TE": "trailers",
+ "Authorization": auth,
+ "x-flow-device-id": f"f-{get_random_string(19)}",
+ "x-nonce": nonce,
+ "x-signature": signature,
+ "x-timestamp": timestamp
+ }
+ async with ClientSession(headers=headers) as session:
+ history = [message for message in messages[:-1] if message["role"] != "system"]
+ system_message = "\n".join([message["content"] for message in messages if message["role"] == "system"])
+ if not system_message:
+ system_message = "You are helpful assistant. Follow the user's instructions carefully."
+ data = {
+ "model": model,
+ "nsfw": False,
+ "question": messages[-1]["content"],
+ "history": [{"role": "assistant", "content": "Hello, how can I help you today?"}, *history],
+ "system": system_message,
+ "temperature": temperature,
+ "promptId": f"model-{model}",
+ "documentIds": [],
+ "chatFileDocumentIds": [],
+ "generateImage": False,
+ "generateAudio": False
+ }
+ async with session.post("https://prod-backend-k8s.flowgpt.com/v3/chat-anonymous", json=data, proxy=proxy) as response:
+ await raise_for_status(response)
+ async for chunk in response.content:
+ if chunk.strip():
+ message = json.loads(chunk)
+ if "event" not in message:
+ continue
+ if message["event"] == "text":
+ yield message["data"]
diff --git a/g4f/Provider/not_working/FreeNetfly.py b/g4f/Provider/not_working/FreeNetfly.py
new file mode 100644
index 00000000..8362019c
--- /dev/null
+++ b/g4f/Provider/not_working/FreeNetfly.py
@@ -0,0 +1,105 @@
+from __future__ import annotations
+
+import json
+import asyncio
+from aiohttp import ClientSession, ClientTimeout, ClientError
+from typing import AsyncGenerator
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+
+
+class FreeNetfly(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://free.netfly.top"
+ api_endpoint = "/api/openai/v1/chat/completions"
+ working = False
+ default_model = 'gpt-3.5-turbo'
+ models = [
+ 'gpt-3.5-turbo',
+ 'gpt-4',
+ ]
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "application/json, text/event-stream",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "dnt": "1",
+ "origin": cls.url,
+ "referer": f"{cls.url}/",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
+ }
+ data = {
+ "messages": messages,
+ "stream": True,
+ "model": model,
+ "temperature": 0.5,
+ "presence_penalty": 0,
+ "frequency_penalty": 0,
+ "top_p": 1
+ }
+
+ max_retries = 5
+ retry_delay = 2
+
+ for attempt in range(max_retries):
+ try:
+ async with ClientSession(headers=headers) as session:
+ timeout = ClientTimeout(total=60)
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy, timeout=timeout) as response:
+ response.raise_for_status()
+ async for chunk in cls._process_response(response):
+ yield chunk
+ return # If successful, exit the function
+ except (ClientError, asyncio.TimeoutError) as e:
+ if attempt == max_retries - 1:
+ raise # If all retries failed, raise the last exception
+ await asyncio.sleep(retry_delay)
+ retry_delay *= 2 # Exponential backoff
+
+ @classmethod
+ async def _process_response(cls, response) -> AsyncGenerator[str, None]:
+ buffer = ""
+ async for line in response.content:
+ buffer += line.decode('utf-8')
+ if buffer.endswith('\n\n'):
+ for subline in buffer.strip().split('\n'):
+ if subline.startswith('data: '):
+ if subline == 'data: [DONE]':
+ return
+ try:
+ data = json.loads(subline[6:])
+ content = data['choices'][0]['delta'].get('content')
+ if content:
+ yield content
+ except json.JSONDecodeError:
+ print(f"Failed to parse JSON: {subline}")
+ except KeyError:
+ print(f"Unexpected JSON structure: {data}")
+ buffer = ""
+
+ # Process any remaining data in the buffer
+ if buffer:
+ for subline in buffer.strip().split('\n'):
+ if subline.startswith('data: ') and subline != 'data: [DONE]':
+ try:
+ data = json.loads(subline[6:])
+ content = data['choices'][0]['delta'].get('content')
+ if content:
+ yield content
+ except (json.JSONDecodeError, KeyError):
+ pass
+
diff --git a/g4f/Provider/not_working/GPROChat.py b/g4f/Provider/not_working/GPROChat.py
new file mode 100644
index 00000000..52c7f947
--- /dev/null
+++ b/g4f/Provider/not_working/GPROChat.py
@@ -0,0 +1,67 @@
+from __future__ import annotations
+import hashlib
+import time
+from aiohttp import ClientSession
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+class GPROChat(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "GPROChat"
+ url = "https://gprochat.com"
+ api_endpoint = "https://gprochat.com/api/generate"
+ working = False
+ supports_stream = True
+ supports_message_history = True
+ default_model = 'gemini-pro'
+
+ @staticmethod
+ def generate_signature(timestamp: int, message: str) -> str:
+ secret_key = "2BC120D4-BB36-1B60-26DE-DB630472A3D8"
+ hash_input = f"{timestamp}:{message}:{secret_key}"
+ signature = hashlib.sha256(hash_input.encode('utf-8')).hexdigest()
+ return signature
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+ timestamp = int(time.time() * 1000)
+ prompt = format_prompt(messages)
+ sign = cls.generate_signature(timestamp, prompt)
+
+ headers = {
+ "accept": "*/*",
+ "origin": cls.url,
+ "referer": f"{cls.url}/",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
+ "content-type": "text/plain;charset=UTF-8"
+ }
+
+ data = {
+ "messages": [{"role": "user", "parts": [{"text": prompt}]}],
+ "time": timestamp,
+ "pass": None,
+ "sign": sign
+ }
+
+ async with ClientSession(headers=headers) as session:
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content.iter_any():
+ if chunk:
+ yield chunk.decode()
diff --git a/g4f/Provider/not_working/Koala.py b/g4f/Provider/not_working/Koala.py
new file mode 100644
index 00000000..d6230da7
--- /dev/null
+++ b/g4f/Provider/not_working/Koala.py
@@ -0,0 +1,79 @@
+from __future__ import annotations
+
+import json
+from typing import AsyncGenerator, Optional, List, Dict, Union, Any
+from aiohttp import ClientSession, BaseConnector, ClientResponse
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import get_random_string, get_connector
+from ...requests import raise_for_status
+
+class Koala(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://koala.sh/chat"
+ api_endpoint = "https://koala.sh/api/gpt/"
+ working = False
+ supports_message_history = True
+ default_model = 'gpt-4o-mini'
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: Optional[str] = None,
+ connector: Optional[BaseConnector] = None,
+ **kwargs: Any
+ ) -> AsyncGenerator[Dict[str, Union[str, int, float, List[Dict[str, Any]], None]], None]:
+ if not model:
+ model = "gpt-4o-mini"
+
+ headers = {
+ "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0",
+ "Accept": "text/event-stream",
+ "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Referer": f"{cls.url}",
+ "Flag-Real-Time-Data": "false",
+ "Visitor-ID": get_random_string(20),
+ "Origin": "https://koala.sh",
+ "Alt-Used": "koala.sh",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "TE": "trailers",
+ }
+
+ async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session:
+ input_text = messages[-1]["content"]
+ system_messages = " ".join(
+ message["content"] for message in messages if message["role"] == "system"
+ )
+ if system_messages:
+ input_text += f" {system_messages}"
+
+ data = {
+ "input": input_text,
+ "inputHistory": [
+ message["content"]
+ for message in messages[:-1]
+ if message["role"] == "user"
+ ],
+ "outputHistory": [
+ message["content"]
+ for message in messages
+ if message["role"] == "assistant"
+ ],
+ "model": model,
+ }
+
+ async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ await raise_for_status(response)
+ async for chunk in cls._parse_event_stream(response):
+ yield chunk
+
+ @staticmethod
+ async def _parse_event_stream(response: ClientResponse) -> AsyncGenerator[Dict[str, Any], None]:
+ async for chunk in response.content:
+ if chunk.startswith(b"data: "):
+ yield json.loads(chunk[6:])
diff --git a/g4f/Provider/not_working/MyShell.py b/g4f/Provider/not_working/MyShell.py
new file mode 100644
index 00000000..02e182d4
--- /dev/null
+++ b/g4f/Provider/not_working/MyShell.py
@@ -0,0 +1,76 @@
+from __future__ import annotations
+
+import time, json
+
+from ...typing import CreateResult, Messages
+from ..base_provider import AbstractProvider
+from ..helper import format_prompt
+from ...webdriver import WebDriver, WebDriverSession, bypass_cloudflare
+
+class MyShell(AbstractProvider):
+ url = "https://app.myshell.ai/chat"
+ working = False
+ supports_gpt_35_turbo = True
+ supports_stream = True
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool,
+ proxy: str = None,
+ timeout: int = 120,
+ webdriver: WebDriver = None,
+ **kwargs
+ ) -> CreateResult:
+ with WebDriverSession(webdriver, "", proxy=proxy) as driver:
+ bypass_cloudflare(driver, cls.url, timeout)
+
+ # Send request with message
+ data = {
+ "botId": "4738",
+ "conversation_scenario": 3,
+ "message": format_prompt(messages),
+ "messageType": 1
+ }
+ script = """
+response = await fetch("https://api.myshell.ai/v1/bot/chat/send_message", {
+ "headers": {
+ "accept": "application/json",
+ "content-type": "application/json",
+ "myshell-service-name": "organics-api",
+ "visitor-id": localStorage.getItem("mix_visitorId")
+ },
+ "body": '{body}',
+ "method": "POST"
+})
+window._reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
+"""
+ driver.execute_script(script.replace("{body}", json.dumps(data)))
+ script = """
+chunk = await window._reader.read();
+if (chunk.done) {
+ return null;
+}
+content = '';
+chunk.value.split('\\n').forEach((line, index) => {
+ if (line.startsWith('data: ')) {
+ try {
+ const data = JSON.parse(line.substring('data: '.length));
+ if ('content' in data) {
+ content += data['content'];
+ }
+ } catch(e) {}
+ }
+});
+return content;
+"""
+ while True:
+ chunk = driver.execute_script(script)
+ if chunk:
+ yield chunk
+ elif chunk != "":
+ break
+ else:
+ time.sleep(0.1)
diff --git a/g4f/Provider/not_working/__init__.py b/g4f/Provider/not_working/__init__.py
new file mode 100644
index 00000000..a6edf5f8
--- /dev/null
+++ b/g4f/Provider/not_working/__init__.py
@@ -0,0 +1,14 @@
+from .AI365VIP import AI365VIP
+from .AIChatFree import AIChatFree
+from .AiChatOnline import AiChatOnline
+from .AiChats import AiChats
+from .AmigoChat import AmigoChat
+from .Aura import Aura
+from .Chatgpt4o import Chatgpt4o
+from .ChatgptFree import ChatgptFree
+from .FlowGpt import FlowGpt
+from .FreeNetfly import FreeNetfly
+from .GPROChat import GPROChat
+from .Koala import Koala
+from .MyShell import MyShell
+from .Chatgpt4Online import Chatgpt4Online