summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/H2o.py
diff options
context:
space:
mode:
authorHeiner Lohaus <heiner@lohaus.eu>2023-10-15 19:10:25 +0200
committerHeiner Lohaus <heiner@lohaus.eu>2023-10-15 19:10:25 +0200
commitc1adfbee8e15406dbdce75f87de47dc1c0dd17df (patch)
treeec8be1ecf785cfd2f8dd0c5592b0e79a291116e0 /g4f/Provider/H2o.py
parent~ (diff)
downloadgpt4free-c1adfbee8e15406dbdce75f87de47dc1c0dd17df.tar
gpt4free-c1adfbee8e15406dbdce75f87de47dc1c0dd17df.tar.gz
gpt4free-c1adfbee8e15406dbdce75f87de47dc1c0dd17df.tar.bz2
gpt4free-c1adfbee8e15406dbdce75f87de47dc1c0dd17df.tar.lz
gpt4free-c1adfbee8e15406dbdce75f87de47dc1c0dd17df.tar.xz
gpt4free-c1adfbee8e15406dbdce75f87de47dc1c0dd17df.tar.zst
gpt4free-c1adfbee8e15406dbdce75f87de47dc1c0dd17df.zip
Diffstat (limited to 'g4f/Provider/H2o.py')
-rw-r--r--g4f/Provider/H2o.py109
1 files changed, 0 insertions, 109 deletions
diff --git a/g4f/Provider/H2o.py b/g4f/Provider/H2o.py
deleted file mode 100644
index 9fac92a4..00000000
--- a/g4f/Provider/H2o.py
+++ /dev/null
@@ -1,109 +0,0 @@
-from __future__ import annotations
-
-import json
-import uuid
-
-from aiohttp import ClientSession
-
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, format_prompt
-
-
-class H2o(AsyncGeneratorProvider):
- url = "https://gpt-gm.h2o.ai"
- working = False
- model = "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1"
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- model = model if model else cls.model
- headers = {"Referer": cls.url + "/"}
-
- async with ClientSession(
- headers=headers
- ) as session:
- data = {
- "ethicsModalAccepted": "true",
- "shareConversationsWithModelAuthors": "true",
- "ethicsModalAcceptedAt": "",
- "activeModel": model,
- "searchEnabled": "true",
- }
- async with session.post(
- f"{cls.url}/settings",
- proxy=proxy,
- data=data
- ) as response:
- response.raise_for_status()
-
- async with session.post(
- f"{cls.url}/conversation",
- proxy=proxy,
- json={"model": model},
- ) as response:
- response.raise_for_status()
- conversationId = (await response.json())["conversationId"]
-
- data = {
- "inputs": format_prompt(messages),
- "parameters": {
- "temperature": 0.4,
- "truncate": 2048,
- "max_new_tokens": 1024,
- "do_sample": True,
- "repetition_penalty": 1.2,
- "return_full_text": False,
- **kwargs
- },
- "stream": True,
- "options": {
- "id": str(uuid.uuid4()),
- "response_id": str(uuid.uuid4()),
- "is_retry": False,
- "use_cache": False,
- "web_search_id": "",
- },
- }
- async with session.post(
- f"{cls.url}/conversation/{conversationId}",
- proxy=proxy,
- json=data
- ) as response:
- start = "data:"
- async for line in response.content:
- line = line.decode("utf-8")
- if line and line.startswith(start):
- line = json.loads(line[len(start):-1])
- if not line["token"]["special"]:
- yield line["token"]["text"]
-
- async with session.delete(
- f"{cls.url}/conversation/{conversationId}",
- proxy=proxy,
- json=data
- ) as response:
- response.raise_for_status()
-
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ("temperature", "float"),
- ("truncate", "int"),
- ("max_new_tokens", "int"),
- ("do_sample", "bool"),
- ("repetition_penalty", "float"),
- ("return_full_text", "bool"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"