From 6ef282de3a3245acbfecd08ae48dba85ff91d031 Mon Sep 17 00:00:00 2001 From: H Lohaus Date: Tue, 12 Mar 2024 02:06:06 +0100 Subject: Remove all not working provider (#1679) Fix many providers Add selenium-wire to requierments --- g4f/Provider/ChatForAi.py | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) (limited to 'g4f/Provider/ChatForAi.py') diff --git a/g4f/Provider/ChatForAi.py b/g4f/Provider/ChatForAi.py index afab034b..5aa728a1 100644 --- a/g4f/Provider/ChatForAi.py +++ b/g4f/Provider/ChatForAi.py @@ -2,15 +2,17 @@ from __future__ import annotations import time import hashlib +import uuid from ..typing import AsyncResult, Messages from ..requests import StreamSession -from .base_provider import AsyncGeneratorProvider +from ..errors import RateLimitError +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin - -class ChatForAi(AsyncGeneratorProvider): +class ChatForAi(AsyncGeneratorProvider, ProviderModelMixin): url = "https://chatforai.store" working = True + default_model = "gpt-3.5-turbo" supports_message_history = True supports_gpt_35_turbo = True @@ -21,36 +23,39 @@ class ChatForAi(AsyncGeneratorProvider): messages: Messages, proxy: str = None, timeout: int = 120, + temperature: float = 0.7, + top_p: float = 1, **kwargs ) -> AsyncResult: + model = cls.get_model(model) headers = { "Content-Type": "text/plain;charset=UTF-8", "Origin": cls.url, "Referer": f"{cls.url}/?r=b", } - async with StreamSession(impersonate="chrome107", headers=headers, proxies={"https": proxy}, timeout=timeout) as session: - prompt = messages[-1]["content"] + async with StreamSession(impersonate="chrome", headers=headers, proxies={"https": proxy}, timeout=timeout) as session: timestamp = int(time.time() * 1e3) - conversation_id = f"id_{timestamp-123}" + conversation_id = str(uuid.uuid4()) data = { "conversationId": conversation_id, "conversationType": "chat_continuous", "botId": "chat_continuous", "globalSettings":{ "baseUrl": "https://api.openai.com", - "model": model if model else "gpt-3.5-turbo", + "model": model, "messageHistorySize": 5, - "temperature": 0.7, - "top_p": 1, + "temperature": temperature, + "top_p": top_p, **kwargs }, - "botSettings": {}, - "prompt": prompt, + "prompt": "", "messages": messages, "timestamp": timestamp, - "sign": generate_signature(timestamp, prompt, conversation_id) + "sign": generate_signature(timestamp, "", conversation_id) } async with session.post(f"{cls.url}/api/handle/provider-openai", json=data) as response: + if response.status == 429: + raise RateLimitError("Rate limit reached") response.raise_for_status() async for chunk in response.iter_content(): if b"https://chatforai.store" in chunk: @@ -59,5 +64,5 @@ class ChatForAi(AsyncGeneratorProvider): def generate_signature(timestamp: int, message: str, id: str): - buffer = f"{timestamp}:{id}:{message}:7YN8z6d6" + buffer = f"{id}:{timestamp}:{message}:h496Jd6b" return hashlib.sha256(buffer.encode()).hexdigest() -- cgit v1.2.3