summaryrefslogtreecommitdiffstats
path: root/docs
diff options
context:
space:
mode:
authorTekky <98614666+xtekky@users.noreply.github.com>2024-10-17 17:56:51 +0200
committerGitHub <noreply@github.com>2024-10-17 17:56:51 +0200
commit66a305998d47e724efaea696bf352428cfcd8291 (patch)
treee372492a190abfa1254e6b05afea8d154aa48225 /docs
parentMerge pull request #2275 from hansipie/setollamahost (diff)
parentUpdate (g4f/Provider/Blackbox.py) (diff)
downloadgpt4free-0.3.3.1.tar
gpt4free-0.3.3.1.tar.gz
gpt4free-0.3.3.1.tar.bz2
gpt4free-0.3.3.1.tar.lz
gpt4free-0.3.3.1.tar.xz
gpt4free-0.3.3.1.tar.zst
gpt4free-0.3.3.1.zip
Diffstat (limited to 'docs')
-rw-r--r--docs/async_client.md2
-rw-r--r--docs/client.md3
-rw-r--r--docs/interference.md4
3 files changed, 4 insertions, 5 deletions
diff --git a/docs/async_client.md b/docs/async_client.md
index a3f773fa..f5ac5392 100644
--- a/docs/async_client.md
+++ b/docs/async_client.md
@@ -187,7 +187,7 @@ async def main():
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Say this is a test"}],
)
- task2 = client.images.generate(
+ task2 = client.images.async_generate(
model="dall-e-3",
prompt="a white siamese cat",
)
diff --git a/docs/client.md b/docs/client.md
index 5e6b79ba..e95c510d 100644
--- a/docs/client.md
+++ b/docs/client.md
@@ -61,8 +61,8 @@ You can use the `ChatCompletions` endpoint to generate text completions as follo
```python
from g4f.client import Client
-client = Client()
+client = Client()
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Say this is a test"}],
@@ -77,7 +77,6 @@ Also streaming are supported:
from g4f.client import Client
client = Client()
-
stream = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Say this is a test"}],
diff --git a/docs/interference.md b/docs/interference.md
index b140f66a..1b4f0c11 100644
--- a/docs/interference.md
+++ b/docs/interference.md
@@ -54,7 +54,7 @@ Send the POST request to /v1/chat/completions with body containing the `model` m
import requests
url = "http://localhost:1337/v1/chat/completions"
body = {
- "model": "gpt-3.5-turbo-16k",
+ "model": "gpt-3.5-turbo",
"stream": False,
"messages": [
{"role": "assistant", "content": "What can you do?"}
@@ -66,4 +66,4 @@ for choice in json_response:
print(choice.get('message', {}).get('content', ''))
```
-[Return to Home](/) \ No newline at end of file
+[Return to Home](/)