summaryrefslogtreecommitdiffstats
path: root/g4f/gui
diff options
context:
space:
mode:
authorH Lohaus <hlohaus@users.noreply.github.com>2024-03-16 18:22:26 +0100
committerGitHub <noreply@github.com>2024-03-16 18:22:26 +0100
commitfb2061da48525edab9cd993205bb5e30c386aa1a (patch)
tree1e740bd6955dfd27b9a4d773df07234ed9e5c75e /g4f/gui
parentMerge pull request #1694 from ComRSMaster/main (diff)
parentAdd conversation support for Bing (diff)
downloadgpt4free-fb2061da48525edab9cd993205bb5e30c386aa1a.tar
gpt4free-fb2061da48525edab9cd993205bb5e30c386aa1a.tar.gz
gpt4free-fb2061da48525edab9cd993205bb5e30c386aa1a.tar.bz2
gpt4free-fb2061da48525edab9cd993205bb5e30c386aa1a.tar.lz
gpt4free-fb2061da48525edab9cd993205bb5e30c386aa1a.tar.xz
gpt4free-fb2061da48525edab9cd993205bb5e30c386aa1a.tar.zst
gpt4free-fb2061da48525edab9cd993205bb5e30c386aa1a.zip
Diffstat (limited to '')
-rw-r--r--g4f/gui/client/index.html (renamed from g4f/gui/client/html/index.html)36
-rw-r--r--g4f/gui/client/static/css/style.css (renamed from g4f/gui/client/css/style.css)18
-rw-r--r--g4f/gui/client/static/img/android-chrome-192x192.png (renamed from g4f/gui/client/img/android-chrome-192x192.png)bin8908 -> 8908 bytes
-rw-r--r--g4f/gui/client/static/img/android-chrome-512x512.png (renamed from g4f/gui/client/img/android-chrome-512x512.png)bin17626 -> 17626 bytes
-rw-r--r--g4f/gui/client/static/img/apple-touch-icon.png (renamed from g4f/gui/client/img/apple-touch-icon.png)bin7984 -> 7984 bytes
-rw-r--r--g4f/gui/client/static/img/favicon-16x16.png (renamed from g4f/gui/client/img/favicon-16x16.png)bin499 -> 499 bytes
-rw-r--r--g4f/gui/client/static/img/favicon-32x32.png (renamed from g4f/gui/client/img/favicon-32x32.png)bin1041 -> 1041 bytes
-rw-r--r--g4f/gui/client/static/img/gpt.png (renamed from g4f/gui/client/img/gpt.png)bin2885 -> 2885 bytes
-rw-r--r--g4f/gui/client/static/img/site.webmanifest (renamed from g4f/gui/client/img/site.webmanifest)0
-rw-r--r--g4f/gui/client/static/img/user.png (renamed from g4f/gui/client/img/user.png)bin17004 -> 17004 bytes
-rw-r--r--g4f/gui/client/static/js/chat.v1.js (renamed from g4f/gui/client/js/chat.v1.js)508
-rw-r--r--g4f/gui/client/static/js/highlight.min.js (renamed from g4f/gui/client/js/highlight.min.js)0
-rw-r--r--g4f/gui/client/static/js/highlightjs-copy.min.js (renamed from g4f/gui/client/js/highlightjs-copy.min.js)0
-rw-r--r--g4f/gui/client/static/js/icons.js (renamed from g4f/gui/client/js/icons.js)0
-rw-r--r--g4f/gui/server/api.py185
-rw-r--r--g4f/gui/server/app.py8
-rw-r--r--g4f/gui/server/backend.py163
-rw-r--r--g4f/gui/server/website.py20
-rw-r--r--g4f/gui/webview.py35
-rw-r--r--g4f/gui/webview.spec45
20 files changed, 624 insertions, 394 deletions
diff --git a/g4f/gui/client/html/index.html b/g4f/gui/client/index.html
index 46a9c541..9ce6b66a 100644
--- a/g4f/gui/client/html/index.html
+++ b/g4f/gui/client/index.html
@@ -1,5 +1,5 @@
<!DOCTYPE html>
-<html lang="en">
+<html lang="en" data-framework="javascript">
<head>
<meta charset="UTF-8">
@@ -10,14 +10,14 @@
<meta property="og:image" content="https://openai.com/content/images/2022/11/ChatGPT.jpg">
<meta property="og:description" content="A conversational AI system that listens, learns, and challenges">
<meta property="og:url" content="https://g4f.ai">
- <link rel="stylesheet" href="/assets/css/style.css">
- <link rel="apple-touch-icon" sizes="180x180" href="/assets/img/apple-touch-icon.png">
- <link rel="icon" type="image/png" sizes="32x32" href="/assets/img/favicon-32x32.png">
- <link rel="icon" type="image/png" sizes="16x16" href="/assets/img/favicon-16x16.png">
- <link rel="manifest" href="/assets/img/site.webmanifest">
- <script src="/assets/js/icons.js"></script>
- <script src="/assets/js/highlightjs-copy.min.js"></script>
- <script src="/assets/js/chat.v1.js" defer></script>
+ <link rel="stylesheet" href="/static/css/style.css">
+ <link rel="apple-touch-icon" sizes="180x180" href="/static/img/apple-touch-icon.png">
+ <link rel="icon" type="image/png" sizes="32x32" href="/static/img/favicon-32x32.png">
+ <link rel="icon" type="image/png" sizes="16x16" href="/static/img/favicon-16x16.png">
+ <link rel="manifest" href="/static/img/site.webmanifest">
+ <script src="/static/js/icons.js"></script>
+ <script src="/static/js/highlightjs-copy.min.js"></script>
+ <script src="/static/js/chat.v1.js" defer></script>
<script src="https://cdn.jsdelivr.net/npm/markdown-it@13.0.1/dist/markdown-it.min.js"></script>
<link rel="stylesheet"
href="//cdn.jsdelivr.net/gh/highlightjs/cdn-release@11.7.0/build/styles/base16/dracula.min.css">
@@ -38,8 +38,8 @@
</script>
<script src="https://unpkg.com/gpt-tokenizer/dist/cl100k_base.js" async></script>
<script>
- const user_image = '<img src="/assets/img/user.png" alt="your avatar">';
- const gpt_image = '<img src="/assets/img/gpt.png" alt="your avatar">';
+ const user_image = '<img src="/static/img/user.png" alt="your avatar">';
+ const gpt_image = '<img src="/static/img/gpt.png" alt="your avatar">';
</script>
<style>
.hljs {
@@ -74,8 +74,8 @@
background: #8b3dff;
}
</style>
- <script src="/assets/js/highlight.min.js"></script>
- <script>window.conversation_id = `{{chat_id}}`</script>
+ <script src="/static/js/highlight.min.js"></script>
+ <script>window.conversation_id = "{{chat_id}}"</script>
<title>g4f - gui</title>
</head>
@@ -94,11 +94,10 @@
<i class="fa-regular fa-trash"></i>
<span>Clear Conversations</span>
</button>
- <div class="info">
- <i class="fa-brands fa-telegram"></i>
- <span class="convo-title">tele ~ <a href="https://t.me/g4f_official">@g4f_official</a>
- </span>
- </div>
+ <button onclick="save_storage()">
+ <i class="fa-solid fa-download"></i>
+ <a href="" onclick="return false;">Export Conversations</a>
+ </button>
<div class="info">
<i class="fa-brands fa-github"></i>
<span class="convo-title">github ~ <a href="https://github.com/xtekky/gpt4free">@gpt4free</a>
@@ -161,6 +160,7 @@
<option value="gemini-pro">gemini-pro</option>
<option value="">----</option>
</select>
+ <select name="model2" id="model2" class="hidden"></select>
</div>
<div class="field">
<select name="jailbreak" id="jailbreak" style="display: none;">
diff --git a/g4f/gui/client/css/style.css b/g4f/gui/client/static/css/style.css
index 17f3e4b3..ba3f1187 100644
--- a/g4f/gui/client/css/style.css
+++ b/g4f/gui/client/static/css/style.css
@@ -15,7 +15,7 @@
margin: auto;
display: flex;
flex-direction: column;
- gap: 16px;
+ gap: var(--inner-gap);
max-width: 200px;
padding: var(--section-gap);
overflow: none;
@@ -106,6 +106,10 @@ body {
border: 1px solid var(--blur-border);
}
+.hidden {
+ display: none;
+}
+
.conversations {
max-width: 260px;
padding: var(--section-gap);
@@ -179,7 +183,8 @@ body {
.conversations {
display: flex;
flex-direction: column;
- gap: 16px;
+ gap: var(--inner-gap);
+ padding: var(--inner-gap);
}
.conversations .title {
@@ -569,7 +574,7 @@ label[for="camera"] {
height: fit-content;
display: flex;
align-items: center;
- gap: 16px;
+ gap: var(--inner-gap);
}
.field .about {
@@ -653,10 +658,15 @@ select {
font-size: 14px;
}
+.bottom_buttons button a {
+ color: var(--colour-3);
+ font-weight: 500;
+}
+
.conversations .top {
display: flex;
flex-direction: column;
- gap: 16px;
+ gap: var(--inner-gap);
overflow: auto;
}
diff --git a/g4f/gui/client/img/android-chrome-192x192.png b/g4f/gui/client/static/img/android-chrome-192x192.png
index 3c32aceb..3c32aceb 100644
--- a/g4f/gui/client/img/android-chrome-192x192.png
+++ b/g4f/gui/client/static/img/android-chrome-192x192.png
Binary files differ
diff --git a/g4f/gui/client/img/android-chrome-512x512.png b/g4f/gui/client/static/img/android-chrome-512x512.png
index ae601c93..ae601c93 100644
--- a/g4f/gui/client/img/android-chrome-512x512.png
+++ b/g4f/gui/client/static/img/android-chrome-512x512.png
Binary files differ
diff --git a/g4f/gui/client/img/apple-touch-icon.png b/g4f/gui/client/static/img/apple-touch-icon.png
index 1143d19a..1143d19a 100644
--- a/g4f/gui/client/img/apple-touch-icon.png
+++ b/g4f/gui/client/static/img/apple-touch-icon.png
Binary files differ
diff --git a/g4f/gui/client/img/favicon-16x16.png b/g4f/gui/client/static/img/favicon-16x16.png
index 6e934fb8..6e934fb8 100644
--- a/g4f/gui/client/img/favicon-16x16.png
+++ b/g4f/gui/client/static/img/favicon-16x16.png
Binary files differ
diff --git a/g4f/gui/client/img/favicon-32x32.png b/g4f/gui/client/static/img/favicon-32x32.png
index efc095b5..efc095b5 100644
--- a/g4f/gui/client/img/favicon-32x32.png
+++ b/g4f/gui/client/static/img/favicon-32x32.png
Binary files differ
diff --git a/g4f/gui/client/img/gpt.png b/g4f/gui/client/static/img/gpt.png
index 60e24da0..60e24da0 100644
--- a/g4f/gui/client/img/gpt.png
+++ b/g4f/gui/client/static/img/gpt.png
Binary files differ
diff --git a/g4f/gui/client/img/site.webmanifest b/g4f/gui/client/static/img/site.webmanifest
index f8eab4d7..f8eab4d7 100644
--- a/g4f/gui/client/img/site.webmanifest
+++ b/g4f/gui/client/static/img/site.webmanifest
diff --git a/g4f/gui/client/img/user.png b/g4f/gui/client/static/img/user.png
index d1908e1d..d1908e1d 100644
--- a/g4f/gui/client/img/user.png
+++ b/g4f/gui/client/static/img/user.png
Binary files differ
diff --git a/g4f/gui/client/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js
index 8774fbf1..5440fc4a 100644
--- a/g4f/gui/client/js/chat.v1.js
+++ b/g4f/gui/client/static/js/chat.v1.js
@@ -1,5 +1,4 @@
const colorThemes = document.querySelectorAll('[name="theme"]');
-const markdown = window.markdownit();
const message_box = document.getElementById(`messages`);
const messageInput = document.getElementById(`message-input`);
const box_conversations = document.querySelector(`.top`);
@@ -12,12 +11,15 @@ const imageInput = document.getElementById("image");
const cameraInput = document.getElementById("camera");
const fileInput = document.getElementById("file");
const inputCount = document.getElementById("input-count")
+const providerSelect = document.getElementById("provider");
const modelSelect = document.getElementById("model");
+const modelProvider = document.getElementById("model2");
const systemPrompt = document.getElementById("systemPrompt")
+const jailbreak = document.getElementById("jailbreak");
let prompt_lock = false;
-hljs.addPlugin(new CopyButtonPlugin());
+const options = ["switch", "model", "model2", "jailbreak", "patch", "provider", "history"];
messageInput.addEventListener("blur", () => {
window.scrollTo(0, 0);
@@ -34,15 +36,17 @@ appStorage = window.localStorage || {
length: 0
}
+const markdown = window.markdownit();
const markdown_render = (content) => {
return markdown.render(content
- .replaceAll(/<!--.+-->/gm, "")
+ .replaceAll(/<!-- generated images start -->[\s\S]+<!-- generated images end -->/gm, "")
.replaceAll(/<img data-prompt="[^>]+">/gm, "")
)
.replaceAll("<a href=", '<a target="_blank" href=')
.replaceAll('<code>', '<code class="language-plaintext">')
}
+hljs.addPlugin(new CopyButtonPlugin());
let typesetPromise = Promise.resolve();
const highlight = (container) => {
container.querySelectorAll('code:not(.hljs').forEach((el) => {
@@ -90,48 +94,48 @@ const handle_ask = async () => {
window.scrollTo(0, 0);
message = messageInput.value
- if (message.length > 0) {
- messageInput.value = "";
- prompt_lock = true;
- count_input()
- await add_conversation(window.conversation_id, message);
- if ("text" in fileInput.dataset) {
- message += '\n```' + fileInput.dataset.type + '\n';
- message += fileInput.dataset.text;
- message += '\n```'
- }
- let message_index = await add_message(window.conversation_id, "user", message);
- window.token = message_id();
-
- if (imageInput.dataset.src) URL.revokeObjectURL(imageInput.dataset.src);
- const input = imageInput && imageInput.files.length > 0 ? imageInput : cameraInput
- if (input.files.length > 0) imageInput.dataset.src = URL.createObjectURL(input.files[0]);
- else delete imageInput.dataset.src
-
- model = modelSelect.options[modelSelect.selectedIndex].value
- message_box.innerHTML += `
- <div class="message" data-index="${message_index}">
- <div class="user">
- ${user_image}
- <i class="fa-solid fa-xmark"></i>
- <i class="fa-regular fa-phone-arrow-up-right"></i>
- </div>
- <div class="content" id="user_${token}">
- <div class="content_inner">
- ${markdown_render(message)}
- ${imageInput.dataset.src
- ? '<img src="' + imageInput.dataset.src + '" alt="Image upload">'
- : ''
- }
- </div>
- <div class="count">${count_words_and_tokens(message, model)}</div>
+ if (message.length <= 0) {
+ return;
+ }
+ messageInput.value = "";
+ prompt_lock = true;
+ count_input()
+ await add_conversation(window.conversation_id, message);
+
+ if ("text" in fileInput.dataset) {
+ message += '\n```' + fileInput.dataset.type + '\n';
+ message += fileInput.dataset.text;
+ message += '\n```'
+ }
+ let message_index = await add_message(window.conversation_id, "user", message);
+ window.token = message_id();
+
+ if (imageInput.dataset.src) URL.revokeObjectURL(imageInput.dataset.src);
+ const input = imageInput && imageInput.files.length > 0 ? imageInput : cameraInput
+ if (input.files.length > 0) imageInput.dataset.src = URL.createObjectURL(input.files[0]);
+ else delete imageInput.dataset.src
+
+ message_box.innerHTML += `
+ <div class="message" data-index="${message_index}">
+ <div class="user">
+ ${user_image}
+ <i class="fa-solid fa-xmark"></i>
+ <i class="fa-regular fa-phone-arrow-up-right"></i>
+ </div>
+ <div class="content" id="user_${token}">
+ <div class="content_inner">
+ ${markdown_render(message)}
+ ${imageInput.dataset.src
+ ? '<img src="' + imageInput.dataset.src + '" alt="Image upload">'
+ : ''
+ }
</div>
+ <div class="count">${count_words_and_tokens(message, get_selected_model())}</div>
</div>
- `;
- await register_remove_message();
- highlight(message_box);
- await ask_gpt();
- }
+ </div>
+ `;
+ highlight(message_box);
+ await ask_gpt();
};
const remove_cancel_button = async () => {
@@ -143,7 +147,7 @@ const remove_cancel_button = async () => {
}, 300);
};
-const prepare_messages = (messages, filter_last_message = true) => {
+const prepare_messages = (messages, filter_last_message=true) => {
// Removes none user messages at end
if (filter_last_message) {
let last_message;
@@ -193,20 +197,54 @@ const prepare_messages = (messages, filter_last_message = true) => {
return new_messages;
}
+async function add_message_chunk(message) {
+ if (message.type == "conversation") {
+ console.info("Conversation used:", message.conversation)
+ } else if (message.type == "provider") {
+ window.provider_result = message.provider;
+ window.content.querySelector('.provider').innerHTML = `
+ <a href="${message.provider.url}" target="_blank">
+ ${message.provider.name}
+ </a>
+ ${message.provider.model ? ' with ' + message.provider.model : ''}
+ `
+ } else if (message.type == "message") {
+ console.error(messag.message)
+ return;
+ } else if (message.type == "error") {
+ console.error(message.error);
+ window.content_inner.innerHTML += `<p><strong>An error occured:</strong> ${message.error}</p>`;
+ } else if (message.type == "content") {
+ window.text += message.content;
+ html = markdown_render(window.text);
+ let lastElement, lastIndex = null;
+ for (element of ['</p>', '</code></pre>', '</p>\n</li>\n</ol>', '</li>\n</ol>', '</li>\n</ul>']) {
+ const index = html.lastIndexOf(element)
+ if (index - element.length > lastIndex) {
+ lastElement = element;
+ lastIndex = index;
+ }
+ }
+ if (lastIndex) {
+ html = html.substring(0, lastIndex) + '<span id="cursor"></span>' + lastElement;
+ }
+ window.content_inner.innerHTML = html;
+ window.content_count.innerText = count_words_and_tokens(text, window.provider_result?.model);
+ highlight(window.content_inner);
+ }
+
+ window.scrollTo(0, 0);
+ if (message_box.scrollTop >= message_box.scrollHeight - message_box.clientHeight - 100) {
+ message_box.scrollTo({ top: message_box.scrollHeight, behavior: "auto" });
+ }
+}
+
const ask_gpt = async () => {
regenerate.classList.add(`regenerate-hidden`);
messages = await get_messages(window.conversation_id);
total_messages = messages.length;
-
messages = prepare_messages(messages);
- window.scrollTo(0, 0);
- window.controller = new AbortController();
-
- jailbreak = document.getElementById("jailbreak");
- provider = document.getElementById("provider");
- window.text = '';
-
stop_generating.classList.remove(`stop_generating-hidden`);
message_box.scrollTop = message_box.scrollHeight;
@@ -229,103 +267,31 @@ const ask_gpt = async () => {
</div>
</div>
`;
- content = document.getElementById(`gpt_${window.token}`);
- content_inner = content.querySelector('.content_inner');
- content_count = content.querySelector('.count');
+
+ window.controller = new AbortController();
+ window.text = "";
+ window.error = null;
+ window.provider_result = null;
+
+ window.content = document.getElementById(`gpt_${window.token}`);
+ window.content_inner = content.querySelector('.content_inner');
+ window.content_count = content.querySelector('.count');
message_box.scrollTop = message_box.scrollHeight;
window.scrollTo(0, 0);
-
- error = provider_result = null;
try {
- let body = JSON.stringify({
+ const input = imageInput && imageInput.files.length > 0 ? imageInput : cameraInput;
+ const file = input && input.files.length > 0 ? input.files[0] : null;
+ await api("conversation", {
id: window.token,
conversation_id: window.conversation_id,
- model: modelSelect.options[modelSelect.selectedIndex].value,
- jailbreak: jailbreak.options[jailbreak.selectedIndex].value,
- web_search: document.getElementById(`switch`).checked,
- provider: provider.options[provider.selectedIndex].value,
- patch_provider: document.getElementById('patch')?.checked,
+ model: get_selected_model(),
+ jailbreak: jailbreak?.options[jailbreak.selectedIndex].value,
+ web_search: document.getElementById("switch").checked,
+ provider: providerSelect.options[providerSelect.selectedIndex].value,
+ patch_provider: document.getElementById("patch")?.checked,
messages: messages
- });
- const headers = {
- accept: 'text/event-stream'
- }
- const input = imageInput && imageInput.files.length > 0 ? imageInput : cameraInput
- if (input && input.files.length > 0) {
- const formData = new FormData();
- formData.append('image', input.files[0]);
- formData.append('json', body);
- body = formData;
- } else {
- headers['content-type'] = 'application/json';
- }
-
- const response = await fetch(`/backend-api/v2/conversation`, {
- method: 'POST',
- signal: window.controller.signal,
- headers: headers,
- body: body
- });
- const reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
- let buffer = ""
- while (true) {
- const { value, done } = await reader.read();
- if (done) break;
- for (const line of value.split("\n")) {
- if (!line) {
- continue;
- }
- let message;
- try {
- message = JSON.parse(buffer + line);
- buffer = "";
- } catch {
- buffer += line
- continue;
- }
- if (message.type == "content") {
- text += message.content;
- } else if (message.type == "provider") {
- provider_result = message.provider
- content.querySelector('.provider').innerHTML = `
- <a href="${provider_result.url}" target="_blank">
- ${provider_result.name}
- </a>
- ${provider_result.model ? ' with ' + provider_result.model : ''}
- `
- } else if (message.type == "error") {
- error = message.error;
- } else if (messag.type == "message") {
- console.error(messag.message)
- }
- }
- if (error) {
- console.error(error);
- content_inner.innerHTML += `<p><strong>An error occured:</strong> ${error}</p>`;
- } else {
- html = markdown_render(text);
- let lastElement, lastIndex = null;
- for (element of ['</p>', '</code></pre>', '</p>\n</li>\n</ol>', '</li>\n</ol>', '</li>\n</ul>']) {
- const index = html.lastIndexOf(element)
- if (index - element.length > lastIndex) {
- lastElement = element;
- lastIndex = index;
- }
- }
- if (lastIndex) {
- html = html.substring(0, lastIndex) + '<span id="cursor"></span>' + lastElement;
- }
- content_inner.innerHTML = html;
- content_count.innerText = count_words_and_tokens(text, provider_result?.model);
- highlight(content_inner);
- }
-
- window.scrollTo(0, 0);
- if (message_box.scrollTop >= message_box.scrollHeight - message_box.clientHeight - 100) {
- message_box.scrollTo({ top: message_box.scrollHeight, behavior: "auto" });
- }
- }
+ }, file);
if (!error) {
html = markdown_render(text);
content_inner.innerHTML = html;
@@ -350,7 +316,7 @@ const ask_gpt = async () => {
await add_message(window.conversation_id, "assistant", text, provider_result);
await load_conversation(window.conversation_id);
} else {
- let cursorDiv = document.getElementById(`cursor`);
+ let cursorDiv = document.getElementById("cursor");
if (cursorDiv) cursorDiv.parentNode.removeChild(cursorDiv);
}
window.scrollTo(0, 0);
@@ -439,7 +405,7 @@ const new_conversation = async () => {
say_hello();
};
-const load_conversation = async (conversation_id, scroll = true) => {
+const load_conversation = async (conversation_id, scroll=true) => {
let conversation = await get_conversation(conversation_id);
let messages = conversation?.items || [];
@@ -454,7 +420,6 @@ const load_conversation = async (conversation_id, scroll = true) => {
last_model = item.provider?.model;
let next_i = parseInt(i) + 1;
let next_provider = item.provider ? item.provider : (messages.length > next_i ? messages[next_i].provider : null);
-
let provider_link = item.provider?.name ? `<a href="${item.provider.url}" target="_blank">${item.provider.name}</a>` : "";
let provider = provider_link ? `
<div class="provider">
@@ -491,7 +456,6 @@ const load_conversation = async (conversation_id, scroll = true) => {
}
message_box.innerHTML = elements;
-
register_remove_message();
highlight(message_box);
@@ -543,7 +507,9 @@ async function add_conversation(conversation_id, content) {
}
async function save_system_message() {
- if (!window.conversation_id) return;
+ if (!window.conversation_id) {
+ return;
+ }
const conversation = await get_conversation(window.conversation_id);
conversation.system = systemPrompt?.value;
await save_conversation(window.conversation_id, conversation);
@@ -580,7 +546,6 @@ const remove_message = async (conversation_id, index) => {
const add_message = async (conversation_id, role, content, provider) => {
const conversation = await get_conversation(conversation_id);
-
conversation.items.push({
role: role,
content: content,
@@ -662,15 +627,14 @@ sidebar_button.addEventListener("click", (event) => {
sidebar.classList.add("shown");
sidebar_button.classList.add("rotated");
}
-
window.scrollTo(0, 0);
});
-const register_settings_localstorage = async () => {
- for (id of ["switch", "model", "jailbreak", "patch", "provider", "history"]) {
+const register_settings_storage = async () => {
+ options.forEach((id) => {
element = document.getElementById(id);
if (!element) {
- continue;
+ return;
}
element.addEventListener('change', async (event) => {
switch (event.target.type) {
@@ -684,14 +648,14 @@ const register_settings_localstorage = async () => {
console.warn("Unresolved element type");
}
});
- }
+ });
}
-const load_settings_localstorage = async () => {
- for (id of ["switch", "model", "jailbreak", "patch", "provider", "history"]) {
+const load_settings_storage = async () => {
+ options.forEach((id) => {
element = document.getElementById(id);
- if (!element || !(value = appStorage.getItem(element.id))) {
- continue;
+ if (!element || !(value = appStorage.getItem(id))) {
+ return;
}
if (value) {
switch (element.type) {
@@ -705,7 +669,7 @@ const load_settings_localstorage = async () => {
console.warn("Unresolved element type");
}
}
- }
+ });
}
const say_hello = async () => {
@@ -780,13 +744,16 @@ function count_words_and_tokens(text, model) {
}
let countFocus = messageInput;
+let timeoutId;
const count_input = async () => {
- if (countFocus.value) {
- model = modelSelect.options[modelSelect.selectedIndex].value;
- inputCount.innerText = count_words_and_tokens(countFocus.value, model);
- } else {
- inputCount.innerHTML = "&nbsp;"
- }
+ if (timeoutId) clearTimeout(timeoutId);
+ timeoutId = setTimeout(() => {
+ if (countFocus.value) {
+ inputCount.innerText = count_words_and_tokens(countFocus.value, get_selected_model());
+ } else {
+ inputCount.innerHTML = "&nbsp;"
+ }
+ }, 100);
};
messageInput.addEventListener("keyup", count_input);
systemPrompt.addEventListener("keyup", count_input);
@@ -799,9 +766,21 @@ systemPrompt.addEventListener("blur", function() {
count_input();
});
-window.onload = async () => {
- setTheme();
+window.addEventListener('load', async function() {
+ await on_load();
+ if (window.conversation_id == "{{chat_id}}") {
+ window.conversation_id = uuid();
+ } else {
+ await on_api();
+ }
+});
+
+window.addEventListener('pywebviewready', async function() {
+ await on_api();
+});
+async function on_load() {
+ setTheme();
count_input();
if (/\/chat\/.+/.test(window.location.href)) {
@@ -809,9 +788,10 @@ window.onload = async () => {
} else {
say_hello()
}
-
load_conversations();
+}
+async function on_api() {
messageInput.addEventListener("keydown", async (evt) => {
if (prompt_lock) return;
@@ -824,46 +804,17 @@ window.onload = async () => {
messageInput.style.height = messageInput.scrollHeight + "px";
}
});
-
sendButton.addEventListener(`click`, async () => {
console.log("clicked send");
if (prompt_lock) return;
await handle_ask();
});
-
messageInput.focus();
- register_settings_localstorage();
-};
-
-(async () => {
- response = await fetch('/backend-api/v2/models')
- models = await response.json()
-
- for (model of models) {
- let option = document.createElement('option');
- option.value = option.text = model;
- modelSelect.appendChild(option);
- }
+ register_settings_storage();
- response = await fetch('/backend-api/v2/providers')
- providers = await response.json()
- select = document.getElementById('provider');
-
- for (provider of providers) {
- let option = document.createElement('option');
- option.value = option.text = provider;
- select.appendChild(option);
- }
-
- await load_settings_localstorage()
-})();
-
-(async () => {
- response = await fetch('/backend-api/v2/version')
- versions = await response.json()
-
- document.title = 'g4f - gui - ' + versions["version"];
+ versions = await api("version");
+ document.title = 'g4f - ' + versions["version"];
let text = "version ~ "
if (versions["version"] != versions["latest_version"]) {
let release_url = 'https://github.com/xtekky/gpt4free/releases/tag/' + versions["latest_version"];
@@ -873,7 +824,24 @@ window.onload = async () => {
text += versions["version"];
}
document.getElementById("version_text").innerHTML = text
-})()
+
+ models = await api("models");
+ models.forEach((model) => {
+ let option = document.createElement("option");
+ option.value = option.text = model;
+ modelSelect.appendChild(option);
+ });
+
+ providers = await api("providers")
+ providers.forEach((provider) => {
+ let option = document.createElement("option");
+ option.value = option.text = provider;
+ providerSelect.appendChild(option);
+ })
+
+ await load_provider_models(appStorage.getItem("provider"));
+ load_settings_storage()
+}
for (const el of [imageInput, cameraInput]) {
el.addEventListener('click', async () => {
@@ -889,6 +857,7 @@ fileInput.addEventListener('click', async (event) => {
fileInput.value = '';
delete fileInput.dataset.text;
});
+
fileInput.addEventListener('change', async (event) => {
if (fileInput.files.length) {
type = fileInput.files[0].type;
@@ -903,8 +872,21 @@ fileInput.addEventListener('change', async (event) => {
}
fileInput.dataset.type = type
const reader = new FileReader();
- reader.addEventListener('load', (event) => {
+ reader.addEventListener('load', async (event) => {
fileInput.dataset.text = event.target.result;
+ if (type == "json") {
+ const data = JSON.parse(fileInput.dataset.text);
+ if ("g4f" in data.options) {
+ Object.keys(data).forEach(key => {
+ if (key != "options" && !localStorage.getItem(key)) {
+ appStorage.setItem(key, JSON.stringify(data[key]));
+ }
+ });
+ delete fileInput.dataset.text;
+ await load_conversations();
+ fileInput.value = "";
+ }
+ }
});
reader.readAsText(fileInput.files[0]);
} else {
@@ -914,4 +896,126 @@ fileInput.addEventListener('change', async (event) => {
systemPrompt?.addEventListener("blur", async () => {
await save_system_message();
-}); \ No newline at end of file
+});
+
+function get_selected_model() {
+ if (modelProvider.selectedIndex >= 0) {
+ return modelProvider.options[modelProvider.selectedIndex].value;
+ } else if (modelSelect.selectedIndex >= 0) {
+ return modelSelect.options[modelSelect.selectedIndex].value;
+ }
+}
+
+async function api(ressource, args=null, file=null) {
+ if (window?.pywebview) {
+ if (args) {
+ if (ressource == "models") {
+ ressource = "provider_models";
+ }
+ return pywebview.api["get_" + ressource](args);
+ }
+ return pywebview.api["get_" + ressource]();
+ }
+ if (ressource == "models" && args) {
+ ressource = `${ressource}/${args}`;
+ }
+ const url = `/backend-api/v2/${ressource}`;
+ if (ressource == "conversation") {
+ const body = JSON.stringify(args);
+ const headers = {
+ accept: 'text/event-stream'
+ }
+ if (file) {
+ const formData = new FormData();
+ formData.append('file', file);
+ formData.append('json', body);
+ body = formData;
+ } else {
+ headers['content-type'] = 'application/json';
+ }
+ response = await fetch(url, {
+ method: 'POST',
+ signal: window.controller.signal,
+ headers: headers,
+ body: body
+ });
+ return read_response(response);
+ }
+ response = await fetch(url);
+ return await response.json();
+}
+
+async function read_response(response) {
+ const reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
+ let buffer = ""
+ while (true) {
+ const { value, done } = await reader.read();
+ if (done) {
+ break;
+ }
+ for (const line of value.split("\n")) {
+ if (!line) {
+ continue;
+ }
+ try {
+ add_message_chunk(JSON.parse(buffer + line))
+ buffer = "";
+ } catch {
+ buffer += line
+ }
+ }
+ }
+}
+
+async function load_provider_models(providerIndex=null) {
+ if (!providerIndex) {
+ providerIndex = providerSelect.selectedIndex;
+ }
+ const provider = providerSelect.options[providerIndex].value;
+ if (!provider) {
+ return;
+ }
+ const models = await api('models', provider);
+ modelProvider.innerHTML = '';
+ if (models.length > 0) {
+ modelSelect.classList.add("hidden");
+ modelProvider.classList.remove("hidden");
+ models.forEach((model) => {
+ let option = document.createElement('option');
+ option.value = option.text = model.model;
+ option.selected = model.default;
+ modelProvider.appendChild(option);
+ });
+ } else {
+ modelProvider.classList.add("hidden");
+ modelSelect.classList.remove("hidden");
+ }
+};
+providerSelect.addEventListener("change", () => load_provider_models());
+
+function save_storage() {
+ let filename = new Date().toLocaleString()
+ filename += ".json"
+ let data = {"options": {"g4f": ""}};
+ for (let i = 0; i < appStorage.length; i++){
+ let key = appStorage.key(i);
+ let item = appStorage.getItem(key);
+ if (key.startsWith("conversation:")) {
+ data[key] = JSON.parse(item);
+ } else {
+ data["options"][key] = item;
+ }
+ }
+ data = JSON.stringify(data, null, 4);
+ const blob = new Blob([data], {type: 'text/csv'});
+ if(window.navigator.msSaveOrOpenBlob) {
+ window.navigator.msSaveBlob(blob, filename);
+ } else{
+ const elem = window.document.createElement('a');
+ elem.href = window.URL.createObjectURL(blob);
+ elem.download = filename;
+ document.body.appendChild(elem);
+ elem.click();
+ document.body.removeChild(elem);
+ }
+} \ No newline at end of file
diff --git a/g4f/gui/client/js/highlight.min.js b/g4f/gui/client/static/js/highlight.min.js
index d410b45b..d410b45b 100644
--- a/g4f/gui/client/js/highlight.min.js
+++ b/g4f/gui/client/static/js/highlight.min.js
diff --git a/g4f/gui/client/js/highlightjs-copy.min.js b/g4f/gui/client/static/js/highlightjs-copy.min.js
index ac11d33e..ac11d33e 100644
--- a/g4f/gui/client/js/highlightjs-copy.min.js
+++ b/g4f/gui/client/static/js/highlightjs-copy.min.js
diff --git a/g4f/gui/client/js/icons.js b/g4f/gui/client/static/js/icons.js
index 84fed38d..84fed38d 100644
--- a/g4f/gui/client/js/icons.js
+++ b/g4f/gui/client/static/js/icons.js
diff --git a/g4f/gui/server/api.py b/g4f/gui/server/api.py
new file mode 100644
index 00000000..966319e4
--- /dev/null
+++ b/g4f/gui/server/api.py
@@ -0,0 +1,185 @@
+import logging
+import json
+from typing import Iterator
+
+try:
+ import webview
+except ImportError:
+ ...
+
+from g4f import version, models
+from g4f import get_last_provider, ChatCompletion
+from g4f.errors import VersionNotFoundError
+from g4f.Provider import ProviderType, __providers__, __map__
+from g4f.providers.base_provider import ProviderModelMixin
+from g4f.Provider.bing.create_images import patch_provider
+from g4f.Provider.Bing import Conversation
+
+conversations: dict[str, Conversation] = {}
+
+class Api():
+
+ def get_models(self) -> list[str]:
+ """
+ Return a list of all models.
+
+ Fetches and returns a list of all available models in the system.
+
+ Returns:
+ List[str]: A list of model names.
+ """
+ return models._all_models
+
+ def get_provider_models(self, provider: str) -> list[dict]:
+ if provider in __map__:
+ provider: ProviderType = __map__[provider]
+ if issubclass(provider, ProviderModelMixin):
+ return [{"model": model, "default": model == provider.default_model} for model in provider.get_models()]
+ elif provider.supports_gpt_35_turbo or provider.supports_gpt_4:
+ return [
+ *([{"model": "gpt-4", "default": not provider.supports_gpt_4}] if provider.supports_gpt_4 else []),
+ *([{"model": "gpt-3.5-turbo", "default": not provider.supports_gpt_4}] if provider.supports_gpt_35_turbo else [])
+ ]
+ else:
+ return [];
+
+ def get_providers(self) -> list[str]:
+ """
+ Return a list of all working providers.
+ """
+ return [provider.__name__ for provider in __providers__ if provider.working]
+
+ def get_version(self):
+ """
+ Returns the current and latest version of the application.
+
+ Returns:
+ dict: A dictionary containing the current and latest version.
+ """
+ try:
+ current_version = version.utils.current_version
+ except VersionNotFoundError:
+ current_version = None
+ return {
+ "version": current_version,
+ "latest_version": version.utils.latest_version,
+ }
+
+ def generate_title(self):
+ """
+ Generates and returns a title based on the request data.
+
+ Returns:
+ dict: A dictionary with the generated title.
+ """
+ return {'title': ''}
+
+ def get_conversation(self, options: dict, **kwargs) -> Iterator:
+ window = webview.active_window()
+ for message in self._create_response_stream(
+ self._prepare_conversation_kwargs(options, kwargs),
+ options.get("conversation_id")
+ ):
+ window.evaluate_js(f"this.add_message_chunk({json.dumps(message)})")
+
+ def _prepare_conversation_kwargs(self, json_data: dict, kwargs: dict):
+ """
+ Prepares arguments for chat completion based on the request data.
+
+ Reads the request and prepares the necessary arguments for handling
+ a chat completion request.
+
+ Returns:
+ dict: Arguments prepared for chat completion.
+ """
+ provider = json_data.get('provider', None)
+ if "image" in kwargs and provider is None:
+ provider = "Bing"
+ if provider == 'OpenaiChat':
+ kwargs['auto_continue'] = True
+
+ messages = json_data['messages']
+ if json_data.get('web_search'):
+ if provider == "Bing":
+ kwargs['web_search'] = True
+ else:
+ from .internet import get_search_message
+ messages[-1]["content"] = get_search_message(messages[-1]["content"])
+
+ conversation_id = json_data.get("conversation_id")
+ if conversation_id and conversation_id in conversations:
+ kwargs["conversation"] = conversations[conversation_id]
+
+ model = json_data.get('model')
+ model = model if model else models.default
+ patch = patch_provider if json_data.get('patch_provider') else None
+
+ return {
+ "model": model,
+ "provider": provider,
+ "messages": messages,
+ "stream": True,
+ "ignore_stream": True,
+ "patch_provider": patch,
+ "return_conversation": True,
+ **kwargs
+ }
+
+ def _create_response_stream(self, kwargs, conversation_id: str) -> Iterator:
+ """
+ Creates and returns a streaming response for the conversation.
+
+ Args:
+ kwargs (dict): Arguments for creating the chat completion.
+
+ Yields:
+ str: JSON formatted response chunks for the stream.
+
+ Raises:
+ Exception: If an error occurs during the streaming process.
+ """
+ try:
+ first = True
+ for chunk in ChatCompletion.create(**kwargs):
+ if first:
+ first = False
+ yield self._format_json("provider", get_last_provider(True))
+ if isinstance(chunk, Conversation):
+ conversations[conversation_id] = chunk
+ yield self._format_json("conversation", conversation_id)
+ elif isinstance(chunk, Exception):
+ logging.exception(chunk)
+ yield self._format_json("message", get_error_message(chunk))
+ else:
+ yield self._format_json("content", chunk)
+ except Exception as e:
+ logging.exception(e)
+ yield self._format_json('error', get_error_message(e))
+
+ def _format_json(self, response_type: str, content):
+ """
+ Formats and returns a JSON response.
+
+ Args:
+ response_type (str): The type of the response.
+ content: The content to be included in the response.
+
+ Returns:
+ str: A JSON formatted string.
+ """
+ return {
+ 'type': response_type,
+ response_type: content
+ }
+
+def get_error_message(exception: Exception) -> str:
+ """
+ Generates a formatted error message from an exception.
+
+ Args:
+ exception (Exception): The exception to format.
+
+ Returns:
+ str: A formatted error message string.
+ """
+ return f"{get_last_provider().__name__}: {type(exception).__name__}: {exception}" \ No newline at end of file
diff --git a/g4f/gui/server/app.py b/g4f/gui/server/app.py
index 2b55698c..869d3880 100644
--- a/g4f/gui/server/app.py
+++ b/g4f/gui/server/app.py
@@ -1,3 +1,9 @@
+import sys, os
from flask import Flask
-app = Flask(__name__, template_folder='./../client/html') \ No newline at end of file
+if getattr(sys, 'frozen', False):
+ template_folder = os.path.join(sys._MEIPASS, "client")
+else:
+ template_folder = "../client"
+
+app = Flask(__name__, template_folder=template_folder, static_folder=f"{template_folder}/static") \ No newline at end of file
diff --git a/g4f/gui/server/backend.py b/g4f/gui/server/backend.py
index 454ed1c6..fb8404d4 100644
--- a/g4f/gui/server/backend.py
+++ b/g4f/gui/server/backend.py
@@ -1,15 +1,9 @@
-import logging
import json
from flask import request, Flask
-from typing import Generator
-from g4f import version, models
-from g4f import get_last_provider, ChatCompletion
from g4f.image import is_allowed_extension, to_image
-from g4f.errors import VersionNotFoundError
-from g4f.Provider import __providers__
-from g4f.Provider.bing.create_images import patch_provider
+from .api import Api
-class Backend_Api:
+class Backend_Api(Api):
"""
Handles various endpoints in a Flask application for backend operations.
@@ -33,6 +27,10 @@ class Backend_Api:
'function': self.get_models,
'methods': ['GET']
},
+ '/backend-api/v2/models/<provider>': {
+ 'function': self.get_provider_models,
+ 'methods': ['GET']
+ },
'/backend-api/v2/providers': {
'function': self.get_providers,
'methods': ['GET']
@@ -54,7 +52,7 @@ class Backend_Api:
'methods': ['POST']
}
}
-
+
def handle_error(self):
"""
Initialize the backend API with the given Flask application.
@@ -64,49 +62,7 @@ class Backend_Api:
"""
print(request.json)
return 'ok', 200
-
- def get_models(self):
- """
- Return a list of all models.
-
- Fetches and returns a list of all available models in the system.
-
- Returns:
- List[str]: A list of model names.
- """
- return models._all_models
-
- def get_providers(self):
- """
- Return a list of all working providers.
- """
- return [provider.__name__ for provider in __providers__ if provider.working]
-
- def get_version(self):
- """
- Returns the current and latest version of the application.
-
- Returns:
- dict: A dictionary containing the current and latest version.
- """
- try:
- current_version = version.utils.current_version
- except VersionNotFoundError:
- current_version = None
- return {
- "version": current_version,
- "latest_version": version.utils.latest_version,
- }
-
- def generate_title(self):
- """
- Generates and returns a title based on the request data.
- Returns:
- dict: A dictionary with the generated title.
- """
- return {'title': ''}
-
def handle_conversation(self):
"""
Handles conversation requests and streams responses back.
@@ -114,26 +70,10 @@ class Backend_Api:
Returns:
Response: A Flask response object for streaming.
"""
- kwargs = self._prepare_conversation_kwargs()
-
- return self.app.response_class(
- self._create_response_stream(kwargs),
- mimetype='text/event-stream'
- )
-
- def _prepare_conversation_kwargs(self):
- """
- Prepares arguments for chat completion based on the request data.
-
- Reads the request and prepares the necessary arguments for handling
- a chat completion request.
-
- Returns:
- dict: Arguments prepared for chat completion.
- """
+
kwargs = {}
- if "image" in request.files:
- file = request.files['image']
+ if "file" in request.files:
+ file = request.files['file']
if file.filename != '' and is_allowed_extension(file.filename):
kwargs['image'] = to_image(file.stream, file.filename.endswith('.svg'))
kwargs['image_name'] = file.filename
@@ -141,66 +81,20 @@ class Backend_Api:
json_data = json.loads(request.form['json'])
else:
json_data = request.json
-
- provider = json_data.get('provider', '').replace('g4f.Provider.', '')
- provider = provider if provider and provider != "Auto" else None
-
- if "image" in kwargs and not provider:
- provider = "Bing"
- if provider == 'OpenaiChat':
- kwargs['auto_continue'] = True
-
- messages = json_data['messages']
- if json_data.get('web_search'):
- if provider == "Bing":
- kwargs['web_search'] = True
- else:
- # ResourceWarning: unclosed event loop
- from .internet import get_search_message
- messages[-1]["content"] = get_search_message(messages[-1]["content"])
-
- model = json_data.get('model')
- model = model if model else models.default
- patch = patch_provider if json_data.get('patch_provider') else None
-
- return {
- "model": model,
- "provider": provider,
- "messages": messages,
- "stream": True,
- "ignore_stream": True,
- "patch_provider": patch,
- **kwargs
- }
- def _create_response_stream(self, kwargs) -> Generator[str, None, None]:
- """
- Creates and returns a streaming response for the conversation.
+ kwargs = self._prepare_conversation_kwargs(json_data, kwargs)
- Args:
- kwargs (dict): Arguments for creating the chat completion.
+ return self.app.response_class(
+ self._create_response_stream(kwargs, json_data.get("conversation_id")),
+ mimetype='text/event-stream'
+ )
- Yields:
- str: JSON formatted response chunks for the stream.
+ def get_provider_models(self, provider: str):
+ models = super().get_provider_models(provider)
+ if models is None:
+ return 404, "Provider not found"
+ return models
- Raises:
- Exception: If an error occurs during the streaming process.
- """
- try:
- first = True
- for chunk in ChatCompletion.create(**kwargs):
- if first:
- first = False
- yield self._format_json('provider', get_last_provider(True))
- if isinstance(chunk, Exception):
- logging.exception(chunk)
- yield self._format_json('message', get_error_message(chunk))
- else:
- yield self._format_json('content', str(chunk))
- except Exception as e:
- logging.exception(e)
- yield self._format_json('error', get_error_message(e))
-
def _format_json(self, response_type: str, content) -> str:
"""
Formats and returns a JSON response.
@@ -212,19 +106,4 @@ class Backend_Api:
Returns:
str: A JSON formatted string.
"""
- return json.dumps({
- 'type': response_type,
- response_type: content
- }) + "\n"
-
-def get_error_message(exception: Exception) -> str:
- """
- Generates a formatted error message from an exception.
-
- Args:
- exception (Exception): The exception to format.
-
- Returns:
- str: A formatted error message string.
- """
- return f"{get_last_provider().__name__}: {type(exception).__name__}: {exception}" \ No newline at end of file
+ return json.dumps(super()._format_json(response_type, content)) + "\n" \ No newline at end of file
diff --git a/g4f/gui/server/website.py b/g4f/gui/server/website.py
index 2705664d..4e611177 100644
--- a/g4f/gui/server/website.py
+++ b/g4f/gui/server/website.py
@@ -1,6 +1,5 @@
-from flask import render_template, send_file, redirect
-from time import time
-from os import urandom
+import uuid
+from flask import render_template, redirect
class Website:
def __init__(self, app) -> None:
@@ -18,23 +17,12 @@ class Website:
'function': self._chat,
'methods': ['GET', 'POST']
},
- '/assets/<folder>/<file>': {
- 'function': self._assets,
- 'methods': ['GET', 'POST']
- }
}
def _chat(self, conversation_id):
if '-' not in conversation_id:
return redirect('/chat')
-
- return render_template('index.html', chat_id = conversation_id)
+ return render_template('index.html', chat_id=conversation_id)
def _index(self):
- return render_template('index.html', chat_id = f'{urandom(4).hex()}-{urandom(2).hex()}-{urandom(2).hex()}-{urandom(2).hex()}-{hex(int(time() * 1000))[2:]}')
-
- def _assets(self, folder: str, file: str):
- try:
- return send_file(f"./../client/{folder}/{file}", as_attachment=False)
- except:
- return "File not found", 404 \ No newline at end of file
+ return render_template('index.html', chat_id=str(uuid.uuid4())) \ No newline at end of file
diff --git a/g4f/gui/webview.py b/g4f/gui/webview.py
index 5a4263dc..a8e745f3 100644
--- a/g4f/gui/webview.py
+++ b/g4f/gui/webview.py
@@ -1,24 +1,37 @@
import webview
-from functools import partial
-from platformdirs import user_config_dir
+try:
+ from platformdirs import user_config_dir
+ has_platformdirs = True
+except ImportError:
+ has_platformdirs = False
-from g4f.gui import run_gui
from g4f.gui.run import gui_parser
+from g4f.gui.server.api import Api
import g4f.version
import g4f.debug
-def run_webview(host: str = "0.0.0.0", port: int = 8080, debug: bool = True):
- webview.create_window(f"g4f - {g4f.version.utils.current_version}", f"http://{host}:{port}/")
- if debug:
- g4f.debug.logging = True
+def run_webview(
+ debug: bool = False,
+ storage_path: str = None
+):
+ webview.create_window(
+ f"g4f - {g4f.version.utils.current_version}",
+ "client/index.html",
+ text_select=True,
+ js_api=Api(),
+ )
+ if has_platformdirs and storage_path is None:
+ storage_path = user_config_dir("g4f-webview")
webview.start(
- partial(run_gui, host, port),
private_mode=False,
- storage_path=user_config_dir("g4f-webview"),
- debug=debug
+ storage_path=storage_path,
+ debug=debug,
+ ssl=True
)
if __name__ == "__main__":
parser = gui_parser()
args = parser.parse_args()
- run_webview(args.host, args.port, args.debug) \ No newline at end of file
+ if args.debug:
+ g4f.debug.logging = True
+ run_webview(args.debug) \ No newline at end of file
diff --git a/g4f/gui/webview.spec b/g4f/gui/webview.spec
new file mode 100644
index 00000000..360e264e
--- /dev/null
+++ b/g4f/gui/webview.spec
@@ -0,0 +1,45 @@
+# -*- mode: python ; coding: utf-8 -*-
+
+
+block_cipher = None
+
+
+a = Analysis(
+ ['webview.py'],
+ pathex=[],
+ binaries=[],
+ datas=[],
+ hiddenimports=[],
+ hookspath=[],
+ hooksconfig={},
+ runtime_hooks=[],
+ excludes=[],
+ win_no_prefer_redirects=False,
+ win_private_assemblies=False,
+ cipher=block_cipher,
+ noarchive=False,
+)
+pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)
+
+exe = EXE(
+ pyz,
+ a.scripts,
+ a.binaries,
+ Tree('client', prefix='client'),
+ a.zipfiles,
+ a.datas,
+ [],
+ name='webview',
+ debug=False,
+ bootloader_ignore_signals=False,
+ strip=False,
+ upx=True,
+ upx_exclude=[],
+ runtime_tmpdir=None,
+ console=False,
+ disable_windowed_traceback=False,
+ argv_emulation=False,
+ target_arch=None,
+ codesign_identity=None,
+ entitlements_file=None,
+)