summaryrefslogtreecommitdiffstats
path: root/g4f/api/__init__.py
diff options
context:
space:
mode:
authorH Lohaus <hlohaus@users.noreply.github.com>2024-01-02 01:10:31 +0100
committerGitHub <noreply@github.com>2024-01-02 01:10:31 +0100
commitb1b8ed40a4e8c7c3490b1c6b7cf6b55d0776f366 (patch)
tree6cd09fb2eb4c144e28a82759a2a9a2fa7f30d311 /g4f/api/__init__.py
parentMerge pull request #1414 from hlohaus/lia (diff)
parentFix markdown replace (diff)
downloadgpt4free-b1b8ed40a4e8c7c3490b1c6b7cf6b55d0776f366.tar
gpt4free-b1b8ed40a4e8c7c3490b1c6b7cf6b55d0776f366.tar.gz
gpt4free-b1b8ed40a4e8c7c3490b1c6b7cf6b55d0776f366.tar.bz2
gpt4free-b1b8ed40a4e8c7c3490b1c6b7cf6b55d0776f366.tar.lz
gpt4free-b1b8ed40a4e8c7c3490b1c6b7cf6b55d0776f366.tar.xz
gpt4free-b1b8ed40a4e8c7c3490b1c6b7cf6b55d0776f366.tar.zst
gpt4free-b1b8ed40a4e8c7c3490b1c6b7cf6b55d0776f366.zip
Diffstat (limited to 'g4f/api/__init__.py')
-rw-r--r--g4f/api/__init__.py50
1 files changed, 31 insertions, 19 deletions
diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py
index 8369d70f..15e8b3a5 100644
--- a/g4f/api/__init__.py
+++ b/g4f/api/__init__.py
@@ -1,23 +1,25 @@
import ast
import logging
-
-from fastapi import FastAPI, Response, Request
-from fastapi.responses import StreamingResponse
-from typing import List, Union, Any, Dict, AnyStr
-#from ._tokenizer import tokenize
-from .. import BaseProvider
-
import time
import json
import random
import string
import uvicorn
import nest_asyncio
+
+from fastapi import FastAPI, Response, Request
+from fastapi.responses import StreamingResponse
+from typing import List, Union, Any, Dict, AnyStr
+#from ._tokenizer import tokenize
+
import g4f
+from .. import debug
+
+debug.logging = True
class Api:
def __init__(self, engine: g4f, debug: bool = True, sentry: bool = False,
- list_ignored_providers: List[Union[str, BaseProvider]] = None) -> None:
+ list_ignored_providers: List[str] = None) -> None:
self.engine = engine
self.debug = debug
self.sentry = sentry
@@ -75,7 +77,10 @@ class Api:
}
# item contains byte keys, and dict.get suppresses error
- item_data.update({key.decode('utf-8') if isinstance(key, bytes) else key: str(value) for key, value in (item or {}).items()})
+ item_data.update({
+ key.decode('utf-8') if isinstance(key, bytes) else key: str(value)
+ for key, value in (item or {}).items()
+ })
# messages is str, need dict
if isinstance(item_data.get('messages'), str):
item_data['messages'] = ast.literal_eval(item_data.get('messages'))
@@ -96,7 +101,12 @@ class Api:
)
except Exception as e:
logging.exception(e)
- return Response(content=json.dumps({"error": "An error occurred while generating the response."}, indent=4), media_type="application/json")
+ content = json.dumps({
+ "error": {"message": f"An error occurred while generating the response:\n{e}"},
+ "model": model,
+ "provider": g4f.get_last_provider(True)
+ })
+ return Response(content=content, status_code=500, media_type="application/json")
completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
completion_timestamp = int(time.time())
@@ -109,6 +119,7 @@ class Api:
'object': 'chat.completion',
'created': completion_timestamp,
'model': model,
+ 'provider': g4f.get_last_provider(True),
'choices': [
{
'index': 0,
@@ -136,6 +147,7 @@ class Api:
'object': 'chat.completion.chunk',
'created': completion_timestamp,
'model': model,
+ 'provider': g4f.get_last_provider(True),
'choices': [
{
'index': 0,
@@ -147,16 +159,14 @@ class Api:
}
],
}
-
- content = json.dumps(completion_data, separators=(',', ':'))
- yield f'data: {content}\n\n'
+ yield f'data: {json.dumps(completion_data)}\n\n'
time.sleep(0.03)
-
end_completion_data = {
'id': f'chatcmpl-{completion_id}',
'object': 'chat.completion.chunk',
'created': completion_timestamp,
'model': model,
+ 'provider': g4f.get_last_provider(True),
'choices': [
{
'index': 0,
@@ -165,15 +175,17 @@ class Api:
}
],
}
-
- content = json.dumps(end_completion_data, separators=(',', ':'))
- yield f'data: {content}\n\n'
+ yield f'data: {json.dumps(end_completion_data)}\n\n'
except GeneratorExit:
pass
except Exception as e:
logging.exception(e)
- content=json.dumps({"error": "An error occurred while generating the response."}, indent=4)
- yield f'data: {content}\n\n'
+ content = json.dumps({
+ "error": {"message": f"An error occurred while generating the response:\n{e}"},
+ "model": model,
+ "provider": g4f.get_last_provider(True),
+ })
+ yield f'data: {content}'
return StreamingResponse(streaming(), media_type="text/event-stream")