summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py53
-rw-r--r--g4f/Provider/openai/proofofwork.py39
-rw-r--r--g4f/Provider/you/har_file.py15
-rw-r--r--g4f/api/__init__.py40
-rw-r--r--g4f/cli.py2
5 files changed, 127 insertions, 22 deletions
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index 515230f0..056a3702 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -24,15 +24,31 @@ except ImportError:
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ...webdriver import get_browser
from ...typing import AsyncResult, Messages, Cookies, ImageType, AsyncIterator
-from ...requests import DEFAULT_HEADERS, get_args_from_browser, raise_for_status
+from ...requests import get_args_from_browser, raise_for_status
from ...requests.aiohttp import StreamSession
from ...image import to_image, to_bytes, ImageResponse, ImageRequest
from ...errors import MissingAuthError, ResponseError
from ...providers.conversation import BaseConversation
from ..helper import format_cookies
from ..openai.har_file import getArkoseAndAccessToken, NoValidHarFileError
+from ..openai.proofofwork import generate_proof_token
from ... import debug
+DEFAULT_HEADERS = {
+ "accept": "*/*",
+ "accept-encoding": "gzip, deflate, br, zstd",
+ "accept-language": "en-US,en;q=0.5",
+ "referer": "https://chat.openai.com/",
+ "sec-ch-ua": "\"Brave\";v=\"123\", \"Not:A-Brand\";v=\"8\", \"Chromium\";v=\"123\"",
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": "\"Windows\"",
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "sec-gpc": "1",
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36"
+}
+
class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
"""A class for creating and managing conversations with OpenAI chat service"""
@@ -355,11 +371,21 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
cls._set_api_key(api_key)
if cls.default_model is None and (not cls.needs_auth or cls._api_key is not None):
+ if cls._api_key is None:
+ cls._create_request_args(cookies)
+ async with session.get(
+ f"{cls.url}/",
+ headers=DEFAULT_HEADERS
+ ) as response:
+ cls._update_request_args(session)
+ await raise_for_status(response)
try:
if not model:
cls.default_model = cls.get_model(await cls.get_default_model(session, cls._headers))
else:
cls.default_model = cls.get_model(model)
+ except MissingAuthError:
+ pass
except Exception as e:
api_key = cls._api_key = None
cls._create_request_args()
@@ -395,9 +421,9 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
blob = data["arkose"]["dx"]
need_arkose = data["arkose"]["required"]
chat_token = data["token"]
-
- if debug.logging:
- print(f'Arkose: {need_arkose} Turnstile: {data["turnstile"]["required"]}')
+ proofofwork = ""
+ if "proofofwork" in data:
+ proofofwork = generate_proof_token(**data["proofofwork"], user_agent=cls._headers["user-agent"])
if need_arkose and arkose_token is None:
arkose_token, api_key, cookies, headers = await getArkoseAndAccessToken(proxy)
@@ -405,6 +431,13 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
cls._set_api_key(api_key)
if arkose_token is None:
raise MissingAuthError("No arkose token found in .har file")
+
+ if debug.logging:
+ print(
+ 'Arkose:', False if not need_arkose else arkose_token[:12]+"...",
+ 'Turnstile:', data["turnstile"]["required"],
+ 'Proofofwork:', False if proofofwork is None else proofofwork[:12]+"...",
+ )
try:
image_request = await cls.upload_image(session, cls._headers, image, image_name) if image else None
@@ -439,12 +472,14 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
messages = messages if conversation_id is None else [messages[-1]]
data["messages"] = cls.create_messages(messages, image_request)
headers = {
- "Accept": "text/event-stream",
- "OpenAI-Sentinel-Chat-Requirements-Token": chat_token,
+ "accept": "text/event-stream",
+ "Openai-Sentinel-Chat-Requirements-Token": chat_token,
**cls._headers
}
if need_arkose:
- headers["OpenAI-Sentinel-Arkose-Token"] = arkose_token
+ headers["Openai-Sentinel-Arkose-Token"] = arkose_token
+ if proofofwork is not None:
+ headers["Openai-Sentinel-Proof-Token"] = proofofwork
async with session.post(
f"{cls.url}/backend-anon/conversation" if cls._api_key is None else
f"{cls.url}/backend-api/conversation",
@@ -671,8 +706,6 @@ this.fetch = async (url, options) => {
return {
**DEFAULT_HEADERS,
"content-type": "application/json",
- "oai-device-id": str(uuid.uuid4()),
- "oai-language": "en-US",
}
@classmethod
@@ -698,6 +731,8 @@ this.fetch = async (url, options) => {
@classmethod
def _update_cookie_header(cls):
cls._headers["cookie"] = format_cookies(cls._cookies)
+ if "oai-did" in cls._cookies:
+ cls._headers["oai-device-id"] = cls._cookies["oai-did"]
class Conversation(BaseConversation):
"""
diff --git a/g4f/Provider/openai/proofofwork.py b/g4f/Provider/openai/proofofwork.py
new file mode 100644
index 00000000..e44ef6f7
--- /dev/null
+++ b/g4f/Provider/openai/proofofwork.py
@@ -0,0 +1,39 @@
+import random
+import hashlib
+import json
+import base64
+from datetime import datetime, timedelta, timezone
+
+def generate_proof_token(required: bool, seed: str, difficulty: str, user_agent: str):
+ if not required:
+ return
+
+ cores = [8, 12, 16, 24]
+ screens = [3000, 4000, 6000]
+
+ core = random.choice(cores)
+ screen = random.choice(screens)
+
+ # Get current UTC time
+ now_utc = datetime.now(timezone.utc)
+ # Convert UTC time to Eastern Time
+ now_et = now_utc.astimezone(timezone(timedelta(hours=-5)))
+
+ parse_time = now_et.strftime('%a, %d %b %Y %H:%M:%S GMT')
+
+ config = [core + screen, parse_time, 4294705152, 0, user_agent]
+
+ diff_len = len(difficulty) // 2
+
+ for i in range(100000):
+ config[3] = i
+ json_data = json.dumps(config)
+ base = base64.b64encode(json_data.encode()).decode()
+ hash_value = hashlib.sha3_512((seed + base).encode()).digest()
+
+ if hash_value.hex()[:diff_len] <= difficulty:
+ result = "gAAAAAB" + base
+ return result
+
+ fallback_base = base64.b64encode(f'"{seed}"'.encode()).decode()
+ return "gAAAAABwQ8Lk5FbGpA2NcR9dShT6gYjU7VxZ4D" + fallback_base
diff --git a/g4f/Provider/you/har_file.py b/g4f/Provider/you/har_file.py
index 791db7f1..8089d686 100644
--- a/g4f/Provider/you/har_file.py
+++ b/g4f/Provider/you/har_file.py
@@ -85,20 +85,21 @@ async def get_telemetry_ids(proxy: str = None) -> list:
from nodriver import start
except ImportError:
raise MissingRequirementsError('Add .har file from you.com or install "nodriver" package | pip install -U nodriver')
+ page = None
try:
browser = await start()
- tab = browser.main_tab
- await browser.get("https://you.com")
+ page = await browser.get("https://you.com")
- while not await tab.evaluate('"GetTelemetryID" in this'):
- await tab.sleep(1)
+ while not await page.evaluate('"GetTelemetryID" in this'):
+ await page.sleep(1)
async def get_telemetry_id():
- return await tab.evaluate(
+ return await page.evaluate(
f'this.GetTelemetryID("{public_token}", "{telemetry_url}");',
await_promise=True
)
- return [await get_telemetry_id() for _ in range(1)]
+ return [await get_telemetry_id()]
finally:
- await tab.close() \ No newline at end of file
+ if page is not None:
+ await page.close() \ No newline at end of file
diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py
index aaac1827..d379653f 100644
--- a/g4f/api/__init__.py
+++ b/g4f/api/__init__.py
@@ -3,11 +3,14 @@ from __future__ import annotations
import logging
import json
import uvicorn
+import secrets
from fastapi import FastAPI, Response, Request
from fastapi.responses import StreamingResponse, RedirectResponse, HTMLResponse, JSONResponse
from fastapi.exceptions import RequestValidationError
-from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY
+from fastapi.security import APIKeyHeader
+from starlette.exceptions import HTTPException
+from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY, HTTP_401_UNAUTHORIZED, HTTP_403_FORBIDDEN
from fastapi.encoders import jsonable_encoder
from pydantic import BaseModel
from typing import Union, Optional
@@ -17,10 +20,11 @@ import g4f.debug
from g4f.client import AsyncClient
from g4f.typing import Messages
-def create_app() -> FastAPI:
+def create_app(g4f_api_key:str = None):
app = FastAPI()
- api = Api(app)
+ api = Api(app, g4f_api_key=g4f_api_key)
api.register_routes()
+ api.register_authorization()
api.register_validation_exception_handler()
return app
@@ -43,9 +47,32 @@ def set_list_ignored_providers(ignored: list[str]):
list_ignored_providers = ignored
class Api:
- def __init__(self, app: FastAPI) -> None:
+ def __init__(self, app: FastAPI, g4f_api_key=None) -> None:
self.app = app
self.client = AsyncClient()
+ self.g4f_api_key = g4f_api_key
+ self.get_g4f_api_key = APIKeyHeader(name="g4f-api-key")
+
+ def register_authorization(self):
+ @self.app.middleware("http")
+ async def authorization(request: Request, call_next):
+ if self.g4f_api_key and request.url.path in ["/v1/chat/completions", "/v1/completions"]:
+ try:
+ user_g4f_api_key = await self.get_g4f_api_key(request)
+ except HTTPException as e:
+ if e.status_code == 403:
+ return JSONResponse(
+ status_code=HTTP_401_UNAUTHORIZED,
+ content=jsonable_encoder({"detail": "G4F API key required"}),
+ )
+ if not secrets.compare_digest(self.g4f_api_key, user_g4f_api_key):
+ return JSONResponse(
+ status_code=HTTP_403_FORBIDDEN,
+ content=jsonable_encoder({"detail": "Invalid G4F API key"}),
+ )
+
+ response = await call_next(request)
+ return response
def register_validation_exception_handler(self):
@self.app.exception_handler(RequestValidationError)
@@ -153,7 +180,8 @@ def run_api(
bind: str = None,
debug: bool = False,
workers: int = None,
- use_colors: bool = None
+ use_colors: bool = None,
+ g4f_api_key: str = None
) -> None:
print(f'Starting server... [g4f v-{g4f.version.utils.current_version}]' + (" (debug)" if debug else ""))
if use_colors is None:
@@ -162,4 +190,4 @@ def run_api(
host, port = bind.split(":")
if debug:
g4f.debug.logging = True
- uvicorn.run("g4f.api:create_app", host=host, port=int(port), workers=workers, use_colors=use_colors, factory=True)# \ No newline at end of file
+ uvicorn.run(create_app(g4f_api_key), host=host, port=int(port), workers=workers, use_colors=use_colors) \ No newline at end of file
diff --git a/g4f/cli.py b/g4f/cli.py
index 86fc2dbb..9037a6f1 100644
--- a/g4f/cli.py
+++ b/g4f/cli.py
@@ -16,6 +16,7 @@ def main():
api_parser.add_argument("--workers", type=int, default=None, help="Number of workers.")
api_parser.add_argument("--disable-colors", action="store_true", help="Don't use colors.")
api_parser.add_argument("--ignore-cookie-files", action="store_true", help="Don't read .har and cookie files.")
+ api_parser.add_argument("--g4f-api-key", type=str, default=None, help="Sets an authentication key for your API.")
api_parser.add_argument("--ignored-providers", nargs="+", choices=[provider for provider in Provider.__map__],
default=[], help="List of providers to ignore when processing request.")
subparsers.add_parser("gui", parents=[gui_parser()], add_help=False)
@@ -42,6 +43,7 @@ def run_api_args(args):
bind=args.bind,
debug=args.debug,
workers=args.workers,
+ g4f_api_key=args.g4f_api_key,
use_colors=not args.disable_colors
)