diff options
author | Tekky <98614666+xtekky@users.noreply.github.com> | 2024-09-16 00:37:30 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-09-16 00:37:30 +0200 |
commit | cc80f2d3159ca0b6f6bfa2c36c4be87bc96209b2 (patch) | |
tree | 25ef52b8c5496ca22ce8eaa82bc885313962ba29 /g4f/Provider/Llama.py | |
parent | Merge pull request #2207 from kqlio67/main (diff) | |
parent | refactor(ReplicateHome): update model handling and API interaction (diff) | |
download | gpt4free-0.3.2.7.tar gpt4free-0.3.2.7.tar.gz gpt4free-0.3.2.7.tar.bz2 gpt4free-0.3.2.7.tar.lz gpt4free-0.3.2.7.tar.xz gpt4free-0.3.2.7.tar.zst gpt4free-0.3.2.7.zip |
Diffstat (limited to 'g4f/Provider/Llama.py')
-rw-r--r-- | g4f/Provider/Llama.py | 91 |
1 files changed, 0 insertions, 91 deletions
diff --git a/g4f/Provider/Llama.py b/g4f/Provider/Llama.py deleted file mode 100644 index 235c0994..00000000 --- a/g4f/Provider/Llama.py +++ /dev/null @@ -1,91 +0,0 @@ -from __future__ import annotations - -from aiohttp import ClientSession - -from ..typing import AsyncResult, Messages -from ..requests.raise_for_status import raise_for_status -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin - - -class Llama(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://www.llama2.ai" - working = False - supports_message_history = True - default_model = "meta/meta-llama-3-70b-instruct" - models = [ - "meta/llama-2-7b-chat", - "meta/llama-2-13b-chat", - "meta/llama-2-70b-chat", - "meta/meta-llama-3-8b-instruct", - "meta/meta-llama-3-70b-instruct", - ] - model_aliases = { - "meta-llama/Meta-Llama-3-8B-Instruct": "meta/meta-llama-3-8b-instruct", - "meta-llama/Meta-Llama-3-70B-Instruct": "meta/meta-llama-3-70b-instruct", - "meta-llama/Llama-2-7b-chat-hf": "meta/llama-2-7b-chat", - "meta-llama/Llama-2-13b-chat-hf": "meta/llama-2-13b-chat", - "meta-llama/Llama-2-70b-chat-hf": "meta/llama-2-70b-chat", - } - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - system_message: str = "You are a helpful assistant.", - temperature: float = 0.75, - top_p: float = 0.9, - max_tokens: int = 8000, - **kwargs - ) -> AsyncResult: - headers = { - "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0", - "Accept": "*/*", - "Accept-Language": "de,en-US;q=0.7,en;q=0.3", - "Accept-Encoding": "gzip, deflate, br", - "Referer": f"{cls.url}/", - "Content-Type": "text/plain;charset=UTF-8", - "Origin": cls.url, - "Connection": "keep-alive", - "Sec-Fetch-Dest": "empty", - "Sec-Fetch-Mode": "cors", - "Sec-Fetch-Site": "same-origin", - "Pragma": "no-cache", - "Cache-Control": "no-cache", - "TE": "trailers" - } - async with ClientSession(headers=headers) as session: - system_messages = [message["content"] for message in messages if message["role"] == "system"] - if system_messages: - system_message = "\n".join(system_messages) - messages = [message for message in messages if message["role"] != "system"] - prompt = format_prompt(messages) - data = { - "prompt": prompt, - "model": cls.get_model(model), - "systemPrompt": system_message, - "temperature": temperature, - "topP": top_p, - "maxTokens": max_tokens, - "image": None - } - started = False - async with session.post(f"{cls.url}/api", json=data, proxy=proxy) as response: - await raise_for_status(response) - async for chunk in response.content.iter_any(): - if not chunk: - continue - if not started: - chunk = chunk.lstrip() - started = True - yield chunk.decode(errors="ignore") - -def format_prompt(messages: Messages): - messages = [ - f"[INST] {message['content']} [/INST]" - if message["role"] == "user" - else message["content"] - for message in messages - ] - return "\n".join(messages) + "\n" |