diff options
-rw-r--r-- | README.md | 9 | ||||
-rw-r--r-- | g4f/Provider/Llama2.py | 4 |
2 files changed, 11 insertions, 2 deletions
@@ -37,6 +37,7 @@ docker pull hlohaus789/g4f - [Install using pypi](#install-using-pypi) + [Docker for Developers](#docker-for-developers) - [💡 Usage](#-usage) + * [The Web UI](#the-web-ui) * [The `g4f` Package](#the-g4f-package) + [ChatCompletion](#chatcompletion) - [Completion](#completion) @@ -182,6 +183,14 @@ docker-compose down ## 💡 Usage +### The Web UI + +To use it in the web interface, type the following codes in the command line. +```python3 +from g4f.gui import run_gui +run_gui() +``` + ### The `g4f` Package #### ChatCompletion diff --git a/g4f/Provider/Llama2.py b/g4f/Provider/Llama2.py index 6d0c0a44..17969621 100644 --- a/g4f/Provider/Llama2.py +++ b/g4f/Provider/Llama2.py @@ -62,7 +62,7 @@ class Llama2(AsyncGeneratorProvider): if not started: chunk = chunk.lstrip() started = True - yield chunk.decode() + yield chunk.decode(errors="ignore") def format_prompt(messages: Messages): messages = [ @@ -71,4 +71,4 @@ def format_prompt(messages: Messages): else message["content"] for message in messages ] - return "\n".join(messages) + "\n"
\ No newline at end of file + return "\n".join(messages) + "\n" |