summaryrefslogtreecommitdiffstats
path: root/testing
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--testing/binghuan/BingHuan.py49
-rw-r--r--testing/binghuan/README.md7
-rw-r--r--testing/binghuan/helpers/binghuan.py221
-rw-r--r--testing/binghuan/testing.py31
4 files changed, 308 insertions, 0 deletions
diff --git a/testing/binghuan/BingHuan.py b/testing/binghuan/BingHuan.py
new file mode 100644
index 00000000..8c859c08
--- /dev/null
+++ b/testing/binghuan/BingHuan.py
@@ -0,0 +1,49 @@
+import os,sys
+import json
+import subprocess
+# from ...typing import sha256, Dict, get_type_hints
+
+url = 'https://b.ai-huan.xyz'
+model = ['gpt-3.5-turbo', 'gpt-4']
+supports_stream = True
+needs_auth = False
+
+def _create_completion(model: str, messages: list, stream: bool, **kwargs):
+ path = os.path.dirname(os.path.realpath(__file__))
+ config = json.dumps({
+ 'messages': messages,
+ 'model': model}, separators=(',', ':'))
+ cmd = ['python', f'{path}/helpers/binghuan.py', config]
+
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+
+ for line in iter(p.stdout.readline, b''):
+ yield line.decode('cp1252')
+
+
+
+# params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
+# '(%s)' % ', '.join(
+# [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
+
+
+# Temporary For ChatCompletion Class
+class ChatCompletion:
+ @staticmethod
+ def create(model: str, messages: list, provider: None or str, stream: bool = False, auth: str = False, **kwargs):
+ kwargs['auth'] = auth
+
+ if provider and needs_auth and not auth:
+ print(
+ f'ValueError: {provider} requires authentication (use auth="cookie or token or jwt ..." param)', file=sys.stderr)
+ sys.exit(1)
+
+ try:
+ return (_create_completion(model, messages, stream, **kwargs)
+ if stream else ''.join(_create_completion(model, messages, stream, **kwargs)))
+ except TypeError as e:
+ print(e)
+ arg: str = str(e).split("'")[1]
+ print(
+ f"ValueError: {provider} does not support '{arg}' argument", file=sys.stderr)
+ sys.exit(1) \ No newline at end of file
diff --git a/testing/binghuan/README.md b/testing/binghuan/README.md
new file mode 100644
index 00000000..642f1fee
--- /dev/null
+++ b/testing/binghuan/README.md
@@ -0,0 +1,7 @@
+https://github.com/xtekky/gpt4free/issues/40#issuecomment-1630946450
+flow chat process is realy like real Bing (create conversation,listern to websocket and more)
+so i just use code Bing Provider from https://gitler.moe/g4f/gpt4free/ version and replace API endpoint and some conversationstyles and work fine
+
+but bing dont realy support multi/continues conversation (using prompt template from original Provider : def convert(messages) : https://github.com/xtekky/gpt4free/blob/e594500c4e7a8443e9b3f4af755c72f42dae83f0/g4f/Provider/Providers/Bing.py#L322)
+
+also i have problem with emoji encoding idk how to fix that \ No newline at end of file
diff --git a/testing/binghuan/helpers/binghuan.py b/testing/binghuan/helpers/binghuan.py
new file mode 100644
index 00000000..203bbe45
--- /dev/null
+++ b/testing/binghuan/helpers/binghuan.py
@@ -0,0 +1,221 @@
+# Original Code From : https://gitler.moe/g4f/gpt4free
+# https://gitler.moe/g4f/gpt4free/src/branch/main/g4f/Provider/Providers/helpers/bing.py
+import sys
+import ssl
+import uuid
+import json
+import time
+import random
+import asyncio
+import certifi
+# import requests
+from curl_cffi import requests
+import websockets
+import browser_cookie3
+
+config = json.loads(sys.argv[1])
+
+ssl_context = ssl.create_default_context()
+ssl_context.load_verify_locations(certifi.where())
+
+
+
+conversationstyles = {
+ 'gpt-4': [ #'precise'
+ "nlu_direct_response_filter",
+ "deepleo",
+ "disable_emoji_spoken_text",
+ "responsible_ai_policy_235",
+ "enablemm",
+ "h3precise",
+ "rcsprtsalwlst",
+ "dv3sugg",
+ "autosave",
+ "clgalileo",
+ "gencontentv3"
+ ],
+ 'balanced': [
+ "nlu_direct_response_filter",
+ "deepleo",
+ "disable_emoji_spoken_text",
+ "responsible_ai_policy_235",
+ "enablemm",
+ "harmonyv3",
+ "rcsprtsalwlst",
+ "dv3sugg",
+ "autosave"
+ ],
+ 'gpt-3.5-turbo': [ #'precise'
+ "nlu_direct_response_filter",
+ "deepleo",
+ "disable_emoji_spoken_text",
+ "responsible_ai_policy_235",
+ "enablemm",
+ "h3imaginative",
+ "rcsprtsalwlst",
+ "dv3sugg",
+ "autosave",
+ "gencontentv3"
+ ]
+}
+
+def format(msg: dict) -> str:
+ return json.dumps(msg) + '\x1e'
+
+def get_token():
+ return
+
+ try:
+ cookies = {c.name: c.value for c in browser_cookie3.edge(domain_name='bing.com')}
+ return cookies['_U']
+ except:
+ print('Error: could not find bing _U cookie in edge browser.')
+ exit(1)
+
+class AsyncCompletion:
+ async def create(
+ prompt : str = None,
+ optionSets : list = None,
+ token : str = None): # No auth required anymore
+
+ create = None
+ for _ in range(5):
+ try:
+ create = requests.get('https://b.ai-huan.xyz/turing/conversation/create',
+ headers = {
+ 'host': 'b.ai-huan.xyz',
+ 'accept-encoding': 'gzip, deflate, br',
+ 'connection': 'keep-alive',
+ 'authority': 'b.ai-huan.xyz',
+ 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'cache-control': 'max-age=0',
+ 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
+ 'sec-ch-ua-arch': '"x86"',
+ 'sec-ch-ua-bitness': '"64"',
+ 'sec-ch-ua-full-version': '"110.0.1587.69"',
+ 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-model': '""',
+ 'sec-ch-ua-platform': '"Windows"',
+ 'sec-ch-ua-platform-version': '"15.0.0"',
+ 'sec-fetch-dest': 'document',
+ 'sec-fetch-mode': 'navigate',
+ 'sec-fetch-site': 'none',
+ 'sec-fetch-user': '?1',
+ 'upgrade-insecure-requests': '1',
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69',
+ 'x-edge-shopping-flag': '1',
+ 'x-forwarded-for': f'13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}'
+ }
+ )
+
+ conversationId = create.json()['conversationId']
+ clientId = create.json()['clientId']
+ conversationSignature = create.json()['conversationSignature']
+
+ except Exception as e:
+ time.sleep(0.5)
+ continue
+
+ if create == None: raise Exception('Failed to create conversation.')
+
+ wss: websockets.WebSocketClientProtocol or None = None
+
+ wss = await websockets.connect('wss://sydney.vcanbb.chat/sydney/ChatHub', max_size = None, ssl = ssl_context,
+ extra_headers = {
+ 'accept': 'application/json',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'content-type': 'application/json',
+ 'sec-ch-ua': '"Not_A Brand";v="99", Microsoft Edge";v="110", "Chromium";v="110"',
+ 'sec-ch-ua-arch': '"x86"',
+ 'sec-ch-ua-bitness': '"64"',
+ 'sec-ch-ua-full-version': '"109.0.1518.78"',
+ 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-model': "",
+ 'sec-ch-ua-platform': '"Windows"',
+ 'sec-ch-ua-platform-version': '"15.0.0"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'x-ms-client-request-id': str(uuid.uuid4()),
+ 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
+ 'Referer': 'https://b.ai-huan.xyz/search?q=Bing+AI&showconv=1&FORM=hpcodx',
+ 'Referrer-Policy': 'origin-when-cross-origin',
+ 'x-forwarded-for': f'13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}'
+ }
+ )
+
+ await wss.send(format({'protocol': 'json', 'version': 1}))
+ await wss.recv()
+
+ struct = {
+ 'arguments': [
+ {
+ 'source': 'cib',
+ 'optionsSets': optionSets,
+ 'isStartOfSession': True,
+ 'message': {
+ 'author': 'user',
+ 'inputMethod': 'Keyboard',
+ 'text': prompt,
+ 'messageType': 'Chat'
+ },
+ 'conversationSignature': conversationSignature,
+ 'participant': {
+ 'id': clientId
+ },
+ 'conversationId': conversationId
+ }
+ ],
+ 'invocationId': '0',
+ 'target': 'chat',
+ 'type': 4
+ }
+
+ await wss.send(format(struct))
+
+ base_string = ''
+
+ final = False
+ while not final:
+ objects = str(await wss.recv()).split('\x1e')
+ for obj in objects:
+ if obj is None or obj == '':
+ continue
+
+ response = json.loads(obj)
+ #print(response, flush=True, end='')
+ if response.get('type') == 1 and response['arguments'][0].get('messages',):
+ response_text = response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0].get('text')
+
+ yield (response_text.replace(base_string, ''))
+ base_string = response_text
+
+ elif response.get('type') == 2:
+ final = True
+
+ await wss.close()
+
+# i thing bing realy donset understand multi message (based on prompt template)
+def convert(messages):
+ context = ""
+ for message in messages:
+ context += "[%s](#message)\n%s\n\n" % (message['role'],
+ message['content'])
+ return context
+
+async def run(optionSets, messages):
+ prompt = messages[-1]['content']
+ if(len(messages) > 1):
+ prompt = convert(messages)
+ async for value in AsyncCompletion.create(prompt=prompt, optionSets=optionSets):
+ try:
+ print(value, flush=True, end='')
+ except UnicodeEncodeError as e:
+ # emoji encoding problem
+ print(value.encode('utf-8'), flush=True, end='')
+
+optionSet = conversationstyles[config['model']]
+asyncio.run(run(optionSet, config['messages'])) \ No newline at end of file
diff --git a/testing/binghuan/testing.py b/testing/binghuan/testing.py
new file mode 100644
index 00000000..2db0b427
--- /dev/null
+++ b/testing/binghuan/testing.py
@@ -0,0 +1,31 @@
+from BingHuan import ChatCompletion
+
+# Test 1
+response = ChatCompletion.create(model="gpt-3.5-turbo",
+ provider="BingHuan",
+ stream=False,
+ messages=[{'role': 'user', 'content': 'who are you?'}])
+
+print(response)
+
+# Test 2
+# this prompt will return emoji in end of response
+response = ChatCompletion.create(model="gpt-3.5-turbo",
+ provider="BingHuan",
+ stream=False,
+ messages=[{'role': 'user', 'content': 'what you can do?'}])
+
+print(response)
+
+
+# Test 3
+response = ChatCompletion.create(model="gpt-4",
+ provider="BingHuan",
+ stream=False,
+ messages=[
+ {'role': 'user', 'content': 'now your name is Bob'},
+ {'role': 'assistant', 'content': 'Hello Im Bob, you asistant'},
+ {'role': 'user', 'content': 'what your name again?'},
+ ])
+
+print(response) \ No newline at end of file