summaryrefslogtreecommitdiffstats
path: root/g4f/Provider
diff options
context:
space:
mode:
Diffstat (limited to 'g4f/Provider')
-rw-r--r--g4f/Provider/Bing.py4
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py38
2 files changed, 28 insertions, 14 deletions
diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py
index 726faa2b..b790a6d2 100644
--- a/g4f/Provider/Bing.py
+++ b/g4f/Provider/Bing.py
@@ -71,7 +71,7 @@ class Conversation():
async def create_conversation(session: ClientSession, tone: str, image: str = None, proxy: str = None) -> Conversation:
url = 'https://www.bing.com/turing/conversation/create?bundleVersion=1.1199.4'
- async with await session.get(url, proxy=proxy) as response:
+ async with session.get(url, proxy=proxy) as response:
data = await response.json()
conversationId = data.get('conversationId')
@@ -115,7 +115,7 @@ async def create_conversation(session: ClientSession, tone: str, image: str = No
headers["content-type"] = f'multipart/form-data; boundary={boundary}'
headers["referer"] = 'https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx'
headers["origin"] = 'https://www.bing.com'
- async with await session.post("https://www.bing.com/images/kblob", data=data, headers=headers, proxy=proxy) as image_upload_response:
+ async with session.post("https://www.bing.com/images/kblob", data=data, headers=headers, proxy=proxy) as image_upload_response:
if image_upload_response.status != 200:
raise Exception("Failed to upload image.")
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index fb81face..bd44628f 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -2,6 +2,7 @@ from __future__ import annotations
import uuid, json, time, os
import tempfile, shutil, asyncio
+import sys, subprocess
from ..base_provider import AsyncGeneratorProvider
from ..helper import get_browser, get_cookies, format_prompt, get_event_loop
@@ -144,11 +145,6 @@ class OpenaiChat(AsyncGeneratorProvider):
return f"g4f.provider.{cls.__name__} supports: ({param})"
async def get_arkose_token(proxy: str = None) -> str:
- node = shutil.which("node")
- if not node:
- if debug.logging:
- print('OpenaiChat: "node" not found')
- return
dir = os.path.dirname(os.path.dirname(__file__))
include = f'{dir}/npm/node_modules/funcaptcha'
config = {
@@ -174,14 +170,32 @@ fun.getToken(config).then(token => {
tmp.write(source.encode())
tmp.close()
try:
- p = await asyncio.create_subprocess_exec(
- node, tmp.name,
- stderr=asyncio.subprocess.PIPE,
- stdout=asyncio.subprocess.PIPE
+ return await exec_js(tmp.name)
+ finally:
+ os.unlink(tmp.name)
+
+async def exec_js(file: str) -> str:
+ node = shutil.which("node")
+ if not node:
+ if debug.logging:
+ print('OpenaiChat: "node" not found')
+ return
+ if sys.platform == 'win32':
+ p = subprocess.Popen(
+ [node, file],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE
)
- stdout, stderr = await p.communicate()
+ stdout, stderr = p.communicate()
if p.returncode == 0:
return stdout.decode()
raise RuntimeError(f"Exec Error: {stderr.decode()}")
- finally:
- os.unlink(tmp.name) \ No newline at end of file
+ p = await asyncio.create_subprocess_exec(
+ node, file,
+ stderr=asyncio.subprocess.PIPE,
+ stdout=asyncio.subprocess.PIPE
+ )
+ stdout, stderr = await p.communicate()
+ if p.returncode == 0:
+ return stdout.decode()
+ raise RuntimeError(f"Exec Error: {stderr.decode()}") \ No newline at end of file