summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/GeminiPro.py
diff options
context:
space:
mode:
authorHeiner Lohaus <hlohaus@users.noreply.github.com>2024-02-24 01:31:17 +0100
committerHeiner Lohaus <hlohaus@users.noreply.github.com>2024-02-24 01:31:17 +0100
commit775a0c43a0856f57dbd847a73b9d20b7cddb5063 (patch)
treee0f7071a2f359ad37ec7ce762b89194d4ba92e19 /g4f/Provider/GeminiPro.py
parentSet default values for generation config (diff)
downloadgpt4free-775a0c43a0856f57dbd847a73b9d20b7cddb5063.tar
gpt4free-775a0c43a0856f57dbd847a73b9d20b7cddb5063.tar.gz
gpt4free-775a0c43a0856f57dbd847a73b9d20b7cddb5063.tar.bz2
gpt4free-775a0c43a0856f57dbd847a73b9d20b7cddb5063.tar.lz
gpt4free-775a0c43a0856f57dbd847a73b9d20b7cddb5063.tar.xz
gpt4free-775a0c43a0856f57dbd847a73b9d20b7cddb5063.tar.zst
gpt4free-775a0c43a0856f57dbd847a73b9d20b7cddb5063.zip
Diffstat (limited to 'g4f/Provider/GeminiPro.py')
-rw-r--r--g4f/Provider/GeminiPro.py19
1 files changed, 10 insertions, 9 deletions
diff --git a/g4f/Provider/GeminiPro.py b/g4f/Provider/GeminiPro.py
index b296f253..e1738dc8 100644
--- a/g4f/Provider/GeminiPro.py
+++ b/g4f/Provider/GeminiPro.py
@@ -7,7 +7,7 @@ from aiohttp import ClientSession
from ..typing import AsyncResult, Messages, ImageType
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..image import to_bytes, is_accepted_format
-
+from ..errors import MissingAuthError
class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://ai.google.dev"
@@ -29,7 +29,8 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
) -> AsyncResult:
model = "gemini-pro-vision" if not model and image else model
model = cls.get_model(model)
- api_key = api_key if api_key else kwargs.get("access_token")
+ if not api_key:
+ raise MissingAuthError('Missing "api_key" for auth')
headers = {
"Content-Type": "application/json",
}
@@ -53,13 +54,13 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
})
data = {
"contents": contents,
- # "generationConfig": {
- # "stopSequences": kwargs.get("stop"),
- # "temperature": kwargs.get("temperature"),
- # "maxOutputTokens": kwargs.get("max_tokens"),
- # "topP": kwargs.get("top_p"),
- # "topK": kwargs.get("top_k"),
- # }
+ "generationConfig": {
+ "stopSequences": kwargs.get("stop"),
+ "temperature": kwargs.get("temperature"),
+ "maxOutputTokens": kwargs.get("max_tokens"),
+ "topP": kwargs.get("top_p"),
+ "topK": kwargs.get("top_k"),
+ }
}
async with session.post(url, params={"key": api_key}, json=data, proxy=proxy) as response:
if not response.ok: