1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
|
from __future__ import annotations
import time
import hashlib
import random
from typing import AsyncGenerator, Optional, Dict, Any
from ..typing import Messages
from ..requests import StreamSession, raise_for_status
from .base_provider import AsyncGeneratorProvider
from ..errors import RateLimitError
# Constants
DOMAINS = [
"https://s.aifree.site",
"https://v.aifree.site/"
]
RATE_LIMIT_ERROR_MESSAGE = "当前地区当日额度已消耗完"
class FreeGpt(AsyncGeneratorProvider):
url: str = "https://freegptsnav.aifree.site"
working: bool = True
supports_message_history: bool = True
supports_system_message: bool = True
supports_gpt_35_turbo: bool = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: Optional[str] = None,
timeout: int = 120,
**kwargs: Any
) -> AsyncGenerator[str, None]:
prompt = messages[-1]["content"]
timestamp = int(time.time())
data = cls._build_request_data(messages, prompt, timestamp)
domain = random.choice(DOMAINS)
async with StreamSession(
impersonate="chrome",
timeout=timeout,
proxies={"all": proxy} if proxy else None
) as session:
async with session.post(f"{domain}/api/generate", json=data) as response:
await raise_for_status(response)
async for chunk in response.iter_content():
chunk_decoded = chunk.decode(errors="ignore")
if chunk_decoded == RATE_LIMIT_ERROR_MESSAGE:
raise RateLimitError("Rate limit reached")
yield chunk_decoded
@staticmethod
def _build_request_data(messages: Messages, prompt: str, timestamp: int, secret: str = "") -> Dict[str, Any]:
return {
"messages": messages,
"time": timestamp,
"pass": None,
"sign": generate_signature(timestamp, prompt, secret)
}
def generate_signature(timestamp: int, message: str, secret: str = "") -> str:
data = f"{timestamp}:{message}:{secret}"
return hashlib.sha256(data.encode()).hexdigest()
|