summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/needs_auth
diff options
context:
space:
mode:
Diffstat (limited to 'g4f/Provider/needs_auth')
-rw-r--r--g4f/Provider/needs_auth/BingCreateImages.py54
-rw-r--r--g4f/Provider/needs_auth/DeepInfra.py58
-rw-r--r--g4f/Provider/needs_auth/DeepInfraImage.py80
-rw-r--r--g4f/Provider/needs_auth/GeminiPro.py111
-rw-r--r--g4f/Provider/needs_auth/Groq.py4
-rw-r--r--g4f/Provider/needs_auth/HuggingFace.py104
-rw-r--r--g4f/Provider/needs_auth/MetaAI.py238
-rw-r--r--g4f/Provider/needs_auth/MetaAIAccount.py23
-rw-r--r--g4f/Provider/needs_auth/OpenRouter.py32
-rw-r--r--g4f/Provider/needs_auth/OpenaiAPI.py (renamed from g4f/Provider/needs_auth/Openai.py)2
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py1
-rw-r--r--g4f/Provider/needs_auth/PerplexityApi.py4
-rw-r--r--g4f/Provider/needs_auth/Replicate.py88
-rw-r--r--g4f/Provider/needs_auth/ThebApi.py6
-rw-r--r--g4f/Provider/needs_auth/WhiteRabbitNeo.py57
-rw-r--r--g4f/Provider/needs_auth/__init__.py33
-rw-r--r--g4f/Provider/needs_auth/gigachat/GigaChat.py92
-rw-r--r--g4f/Provider/needs_auth/gigachat/__init__.py2
-rw-r--r--g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt33
19 files changed, 971 insertions, 51 deletions
diff --git a/g4f/Provider/needs_auth/BingCreateImages.py b/g4f/Provider/needs_auth/BingCreateImages.py
new file mode 100644
index 00000000..80984d40
--- /dev/null
+++ b/g4f/Provider/needs_auth/BingCreateImages.py
@@ -0,0 +1,54 @@
+from __future__ import annotations
+
+from ...cookies import get_cookies
+from ...image import ImageResponse
+from ...errors import MissingAuthError
+from ...typing import AsyncResult, Messages, Cookies
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..bing.create_images import create_images, create_session
+
+class BingCreateImages(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Microsoft Designer in Bing"
+ parent = "Bing"
+ url = "https://www.bing.com/images/create"
+ working = True
+ needs_auth = True
+ image_models = ["dall-e"]
+
+ def __init__(self, cookies: Cookies = None, proxy: str = None, api_key: str = None) -> None:
+ if api_key is not None:
+ if cookies is None:
+ cookies = {}
+ cookies["_U"] = api_key
+ self.cookies = cookies
+ self.proxy = proxy
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ api_key: str = None,
+ cookies: Cookies = None,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ session = BingCreateImages(cookies, proxy, api_key)
+ yield await session.generate(messages[-1]["content"])
+
+ async def generate(self, prompt: str) -> ImageResponse:
+ """
+ Asynchronously creates a markdown formatted string with images based on the prompt.
+
+ Args:
+ prompt (str): Prompt to generate images.
+
+ Returns:
+ str: Markdown formatted string with images.
+ """
+ cookies = self.cookies or get_cookies(".bing.com", False)
+ if cookies is None or "_U" not in cookies:
+ raise MissingAuthError('Missing "_U" cookie')
+ async with create_session(cookies, self.proxy) as session:
+ images = await create_images(session, prompt)
+ return ImageResponse(images, prompt, {"preview": "{image}?w=200&h=200"} if len(images) > 1 else {}) \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/DeepInfra.py b/g4f/Provider/needs_auth/DeepInfra.py
new file mode 100644
index 00000000..35e7ca7f
--- /dev/null
+++ b/g4f/Provider/needs_auth/DeepInfra.py
@@ -0,0 +1,58 @@
+from __future__ import annotations
+
+import requests
+from ...typing import AsyncResult, Messages
+from .OpenaiAPI import OpenaiAPI
+
+class DeepInfra(OpenaiAPI):
+ label = "DeepInfra"
+ url = "https://deepinfra.com"
+ working = True
+ needs_auth = True
+ supports_stream = True
+ supports_message_history = True
+ default_model = "meta-llama/Meta-Llama-3.1-70B-Instruct"
+
+ @classmethod
+ def get_models(cls):
+ if not cls.models:
+ url = 'https://api.deepinfra.com/models/featured'
+ models = requests.get(url).json()
+ cls.models = [model['model_name'] for model in models if model["type"] == "text-generation"]
+ return cls.models
+
+ @classmethod
+ def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool,
+ api_base: str = "https://api.deepinfra.com/v1/openai",
+ temperature: float = 0.7,
+ max_tokens: int = 1028,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ 'Accept-Encoding': 'gzip, deflate, br',
+ 'Accept-Language': 'en-US',
+ 'Connection': 'keep-alive',
+ 'Origin': 'https://deepinfra.com',
+ 'Referer': 'https://deepinfra.com/',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'same-site',
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
+ 'X-Deepinfra-Source': 'web-embed',
+ 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ }
+ return super().create_async_generator(
+ model, messages,
+ stream=stream,
+ api_base=api_base,
+ temperature=temperature,
+ max_tokens=max_tokens,
+ headers=headers,
+ **kwargs
+ )
diff --git a/g4f/Provider/needs_auth/DeepInfraImage.py b/g4f/Provider/needs_auth/DeepInfraImage.py
new file mode 100644
index 00000000..2310c1c8
--- /dev/null
+++ b/g4f/Provider/needs_auth/DeepInfraImage.py
@@ -0,0 +1,80 @@
+from __future__ import annotations
+
+import requests
+
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...typing import AsyncResult, Messages
+from ...requests import StreamSession, raise_for_status
+from ...image import ImageResponse
+
+class DeepInfraImage(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://deepinfra.com"
+ parent = "DeepInfra"
+ working = True
+ needs_auth = True
+ default_model = ''
+ image_models = [default_model]
+
+ @classmethod
+ def get_models(cls):
+ if not cls.models:
+ url = 'https://api.deepinfra.com/models/featured'
+ models = requests.get(url).json()
+ cls.models = [model['model_name'] for model in models if model["reported_type"] == "text-to-image"]
+ cls.image_models = cls.models
+ return cls.models
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ **kwargs
+ ) -> AsyncResult:
+ yield await cls.create_async(messages[-1]["content"], model, **kwargs)
+
+ @classmethod
+ async def create_async(
+ cls,
+ prompt: str,
+ model: str,
+ api_key: str = None,
+ api_base: str = "https://api.deepinfra.com/v1/inference",
+ proxy: str = None,
+ timeout: int = 180,
+ extra_data: dict = {},
+ **kwargs
+ ) -> ImageResponse:
+ headers = {
+ 'Accept-Encoding': 'gzip, deflate, br',
+ 'Accept-Language': 'en-US',
+ 'Connection': 'keep-alive',
+ 'Origin': 'https://deepinfra.com',
+ 'Referer': 'https://deepinfra.com/',
+ 'Sec-Fetch-Dest': 'empty',
+ 'Sec-Fetch-Mode': 'cors',
+ 'Sec-Fetch-Site': 'same-site',
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
+ 'X-Deepinfra-Source': 'web-embed',
+ 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ }
+ if api_key is not None:
+ headers["Authorization"] = f"Bearer {api_key}"
+ async with StreamSession(
+ proxies={"all": proxy},
+ headers=headers,
+ timeout=timeout
+ ) as session:
+ model = cls.get_model(model)
+ data = {"prompt": prompt, **extra_data}
+ data = {"input": data} if model == cls.default_model else data
+ async with session.post(f"{api_base.rstrip('/')}/{model}", json=data) as response:
+ await raise_for_status(response)
+ data = await response.json()
+ images = data["output"] if "output" in data else data["images"]
+ if not images:
+ raise RuntimeError(f"Response: {data}")
+ images = images[0] if len(images) == 1 else images
+ return ImageResponse(images, prompt)
diff --git a/g4f/Provider/needs_auth/GeminiPro.py b/g4f/Provider/needs_auth/GeminiPro.py
new file mode 100644
index 00000000..7e52a194
--- /dev/null
+++ b/g4f/Provider/needs_auth/GeminiPro.py
@@ -0,0 +1,111 @@
+from __future__ import annotations
+
+import base64
+import json
+from aiohttp import ClientSession, BaseConnector
+
+from ...typing import AsyncResult, Messages, ImageType
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...image import to_bytes, is_accepted_format
+from ...errors import MissingAuthError
+from ..helper import get_connector
+
+class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Gemini API"
+ url = "https://ai.google.dev"
+ working = True
+ supports_message_history = True
+ needs_auth = True
+ default_model = "gemini-1.5-pro-latest"
+ default_vision_model = default_model
+ models = [default_model, "gemini-pro", "gemini-pro-vision", "gemini-1.5-flash"]
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool = False,
+ proxy: str = None,
+ api_key: str = None,
+ api_base: str = "https://generativelanguage.googleapis.com/v1beta",
+ use_auth_header: bool = False,
+ image: ImageType = None,
+ connector: BaseConnector = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ if not api_key:
+ raise MissingAuthError('Add a "api_key"')
+
+ headers = params = None
+ if use_auth_header:
+ headers = {"Authorization": f"Bearer {api_key}"}
+ else:
+ params = {"key": api_key}
+
+ method = "streamGenerateContent" if stream else "generateContent"
+ url = f"{api_base.rstrip('/')}/models/{model}:{method}"
+ async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session:
+ contents = [
+ {
+ "role": "model" if message["role"] == "assistant" else "user",
+ "parts": [{"text": message["content"]}]
+ }
+ for message in messages
+ if message["role"] != "system"
+ ]
+ if image is not None:
+ image = to_bytes(image)
+ contents[-1]["parts"].append({
+ "inline_data": {
+ "mime_type": is_accepted_format(image),
+ "data": base64.b64encode(image).decode()
+ }
+ })
+ data = {
+ "contents": contents,
+ "generationConfig": {
+ "stopSequences": kwargs.get("stop"),
+ "temperature": kwargs.get("temperature"),
+ "maxOutputTokens": kwargs.get("max_tokens"),
+ "topP": kwargs.get("top_p"),
+ "topK": kwargs.get("top_k"),
+ }
+ }
+ system_prompt = "\n".join(
+ message["content"]
+ for message in messages
+ if message["role"] == "system"
+ )
+ if system_prompt:
+ data["system_instruction"] = {"parts": {"text": system_prompt}}
+ async with session.post(url, params=params, json=data) as response:
+ if not response.ok:
+ data = await response.json()
+ data = data[0] if isinstance(data, list) else data
+ raise RuntimeError(f"Response {response.status}: {data['error']['message']}")
+ if stream:
+ lines = []
+ async for chunk in response.content:
+ if chunk == b"[{\n":
+ lines = [b"{\n"]
+ elif chunk == b",\r\n" or chunk == b"]":
+ try:
+ data = b"".join(lines)
+ data = json.loads(data)
+ yield data["candidates"][0]["content"]["parts"][0]["text"]
+ except:
+ data = data.decode(errors="ignore") if isinstance(data, bytes) else data
+ raise RuntimeError(f"Read chunk failed: {data}")
+ lines = []
+ else:
+ lines.append(chunk)
+ else:
+ data = await response.json()
+ candidate = data["candidates"][0]
+ if candidate["finishReason"] == "STOP":
+ yield candidate["content"]["parts"][0]["text"]
+ else:
+ yield candidate["finishReason"] + ' ' + candidate["safetyRatings"] \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/Groq.py b/g4f/Provider/needs_auth/Groq.py
index 027d98bf..943fc81a 100644
--- a/g4f/Provider/needs_auth/Groq.py
+++ b/g4f/Provider/needs_auth/Groq.py
@@ -1,9 +1,9 @@
from __future__ import annotations
-from .Openai import Openai
+from .OpenaiAPI import OpenaiAPI
from ...typing import AsyncResult, Messages
-class Groq(Openai):
+class Groq(OpenaiAPI):
label = "Groq"
url = "https://console.groq.com/playground"
working = True
diff --git a/g4f/Provider/needs_auth/HuggingFace.py b/g4f/Provider/needs_auth/HuggingFace.py
new file mode 100644
index 00000000..ecc75d1c
--- /dev/null
+++ b/g4f/Provider/needs_auth/HuggingFace.py
@@ -0,0 +1,104 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession, BaseConnector
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import get_connector
+from ...errors import RateLimitError, ModelNotFoundError
+from ...requests.raise_for_status import raise_for_status
+
+from ..HuggingChat import HuggingChat
+
+class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://huggingface.co/chat"
+ working = True
+ needs_auth = True
+ supports_message_history = True
+ default_model = HuggingChat.default_model
+ models = HuggingChat.models
+ model_aliases = HuggingChat.model_aliases
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool = True,
+ proxy: str = None,
+ connector: BaseConnector = None,
+ api_base: str = "https://api-inference.huggingface.co",
+ api_key: str = None,
+ max_new_tokens: int = 1024,
+ temperature: float = 0.7,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+ headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en',
+ 'cache-control': 'no-cache',
+ 'origin': 'https://huggingface.co',
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'referer': 'https://huggingface.co/chat/',
+ 'sec-ch-ua': '"Not)A;Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
+ }
+ if api_key is not None:
+ headers["Authorization"] = f"Bearer {api_key}"
+
+ params = {
+ "return_full_text": False,
+ "max_new_tokens": max_new_tokens,
+ "temperature": temperature,
+ **kwargs
+ }
+ payload = {"inputs": format_prompt(messages), "parameters": params, "stream": stream}
+
+ async with ClientSession(
+ headers=headers,
+ connector=get_connector(connector, proxy)
+ ) as session:
+ async with session.post(f"{api_base.rstrip('/')}/models/{model}", json=payload) as response:
+ if response.status == 404:
+ raise ModelNotFoundError(f"Model is not supported: {model}")
+ await raise_for_status(response)
+ if stream:
+ first = True
+ async for line in response.content:
+ if line.startswith(b"data:"):
+ data = json.loads(line[5:])
+ if not data["token"]["special"]:
+ chunk = data["token"]["text"]
+ if first:
+ first = False
+ chunk = chunk.lstrip()
+ yield chunk
+ else:
+ yield (await response.json())[0]["generated_text"].strip()
+
+def format_prompt(messages: Messages) -> str:
+ system_messages = [message["content"] for message in messages if message["role"] == "system"]
+ question = " ".join([messages[-1]["content"], *system_messages])
+ history = "".join([
+ f"<s>[INST]{messages[idx-1]['content']} [/INST] {message['content']}</s>"
+ for idx, message in enumerate(messages)
+ if message["role"] == "assistant"
+ ])
+ return f"{history}<s>[INST] {question} [/INST]"
diff --git a/g4f/Provider/needs_auth/MetaAI.py b/g4f/Provider/needs_auth/MetaAI.py
new file mode 100644
index 00000000..4b730abd
--- /dev/null
+++ b/g4f/Provider/needs_auth/MetaAI.py
@@ -0,0 +1,238 @@
+from __future__ import annotations
+
+import json
+import uuid
+import random
+import time
+from typing import Dict, List
+
+from aiohttp import ClientSession, BaseConnector
+
+from ...typing import AsyncResult, Messages, Cookies
+from ...requests import raise_for_status, DEFAULT_HEADERS
+from ...image import ImageResponse, ImagePreview
+from ...errors import ResponseError
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt, get_connector, format_cookies
+
+class Sources():
+ def __init__(self, link_list: List[Dict[str, str]]) -> None:
+ self.list = link_list
+
+ def __str__(self) -> str:
+ return "\n\n" + ("\n".join([f"[{link['title']}]({link['link']})" for link in self.list]))
+
+class AbraGeoBlockedError(Exception):
+ pass
+
+class MetaAI(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Meta AI"
+ url = "https://www.meta.ai"
+ working = True
+ default_model = ''
+
+ def __init__(self, proxy: str = None, connector: BaseConnector = None):
+ self.session = ClientSession(connector=get_connector(connector, proxy), headers=DEFAULT_HEADERS)
+ self.cookies: Cookies = None
+ self.access_token: str = None
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ async for chunk in cls(proxy).prompt(format_prompt(messages)):
+ yield chunk
+
+ async def update_access_token(self, birthday: str = "1999-01-01"):
+ url = "https://www.meta.ai/api/graphql/"
+ payload = {
+ "lsd": self.lsd,
+ "fb_api_caller_class": "RelayModern",
+ "fb_api_req_friendly_name": "useAbraAcceptTOSForTempUserMutation",
+ "variables": json.dumps({
+ "dob": birthday,
+ "icebreaker_type": "TEXT",
+ "__relay_internal__pv__WebPixelRatiorelayprovider": 1,
+ }),
+ "doc_id": "7604648749596940",
+ }
+ headers = {
+ "x-fb-friendly-name": "useAbraAcceptTOSForTempUserMutation",
+ "x-fb-lsd": self.lsd,
+ "x-asbd-id": "129477",
+ "alt-used": "www.meta.ai",
+ "sec-fetch-site": "same-origin"
+ }
+ async with self.session.post(url, headers=headers, cookies=self.cookies, data=payload) as response:
+ await raise_for_status(response, "Fetch access_token failed")
+ auth_json = await response.json(content_type=None)
+ self.access_token = auth_json["data"]["xab_abra_accept_terms_of_service"]["new_temp_user_auth"]["access_token"]
+
+ async def prompt(self, message: str, cookies: Cookies = None) -> AsyncResult:
+ if self.cookies is None:
+ await self.update_cookies(cookies)
+ if cookies is not None:
+ self.access_token = None
+ if self.access_token is None and cookies is None:
+ await self.update_access_token()
+
+ if self.access_token is None:
+ url = "https://www.meta.ai/api/graphql/"
+ payload = {"lsd": self.lsd, 'fb_dtsg': self.dtsg}
+ headers = {'x-fb-lsd': self.lsd}
+ else:
+ url = "https://graph.meta.ai/graphql?locale=user"
+ payload = {"access_token": self.access_token}
+ headers = {}
+ headers = {
+ 'content-type': 'application/x-www-form-urlencoded',
+ 'cookie': format_cookies(self.cookies),
+ 'origin': 'https://www.meta.ai',
+ 'referer': 'https://www.meta.ai/',
+ 'x-asbd-id': '129477',
+ 'x-fb-friendly-name': 'useAbraSendMessageMutation',
+ **headers
+ }
+ payload = {
+ **payload,
+ 'fb_api_caller_class': 'RelayModern',
+ 'fb_api_req_friendly_name': 'useAbraSendMessageMutation',
+ "variables": json.dumps({
+ "message": {"sensitive_string_value": message},
+ "externalConversationId": str(uuid.uuid4()),
+ "offlineThreadingId": generate_offline_threading_id(),
+ "suggestedPromptIndex": None,
+ "flashVideoRecapInput": {"images": []},
+ "flashPreviewInput": None,
+ "promptPrefix": None,
+ "entrypoint": "ABRA__CHAT__TEXT",
+ "icebreaker_type": "TEXT",
+ "__relay_internal__pv__AbraDebugDevOnlyrelayprovider": False,
+ "__relay_internal__pv__WebPixelRatiorelayprovider": 1,
+ }),
+ 'server_timestamps': 'true',
+ 'doc_id': '7783822248314888'
+ }
+ async with self.session.post(url, headers=headers, data=payload) as response:
+ await raise_for_status(response, "Fetch response failed")
+ last_snippet_len = 0
+ fetch_id = None
+ async for line in response.content:
+ if b"<h1>Something Went Wrong</h1>" in line:
+ raise ResponseError("Response: Something Went Wrong")
+ try:
+ json_line = json.loads(line)
+ except json.JSONDecodeError:
+ continue
+ bot_response_message = json_line.get("data", {}).get("node", {}).get("bot_response_message", {})
+ streaming_state = bot_response_message.get("streaming_state")
+ fetch_id = bot_response_message.get("fetch_id") or fetch_id
+ if streaming_state in ("STREAMING", "OVERALL_DONE"):
+ imagine_card = bot_response_message.get("imagine_card")
+ if imagine_card is not None:
+ imagine_session = imagine_card.get("session")
+ if imagine_session is not None:
+ imagine_medias = imagine_session.get("media_sets", {}).pop().get("imagine_media")
+ if imagine_medias is not None:
+ image_class = ImageResponse if streaming_state == "OVERALL_DONE" else ImagePreview
+ yield image_class([media["uri"] for media in imagine_medias], imagine_medias[0]["prompt"])
+ snippet = bot_response_message["snippet"]
+ new_snippet_len = len(snippet)
+ if new_snippet_len > last_snippet_len:
+ yield snippet[last_snippet_len:]
+ last_snippet_len = new_snippet_len
+ #if last_streamed_response is None:
+ # if attempts > 3:
+ # raise Exception("MetaAI is having issues and was not able to respond (Server Error)")
+ # access_token = await self.get_access_token()
+ # return await self.prompt(message=message, attempts=attempts + 1)
+ if fetch_id is not None:
+ sources = await self.fetch_sources(fetch_id)
+ if sources is not None:
+ yield sources
+
+ async def update_cookies(self, cookies: Cookies = None):
+ async with self.session.get("https://www.meta.ai/", cookies=cookies) as response:
+ await raise_for_status(response, "Fetch home failed")
+ text = await response.text()
+ if "AbraGeoBlockedError" in text:
+ raise AbraGeoBlockedError("Meta AI isn't available yet in your country")
+ if cookies is None:
+ cookies = {
+ "_js_datr": self.extract_value(text, "_js_datr"),
+ "abra_csrf": self.extract_value(text, "abra_csrf"),
+ "datr": self.extract_value(text, "datr"),
+ }
+ self.lsd = self.extract_value(text, start_str='"LSD",[],{"token":"', end_str='"}')
+ self.dtsg = self.extract_value(text, start_str='"DTSGInitialData",[],{"token":"', end_str='"}')
+ self.cookies = cookies
+
+ async def fetch_sources(self, fetch_id: str) -> Sources:
+ if self.access_token is None:
+ url = "https://www.meta.ai/api/graphql/"
+ payload = {"lsd": self.lsd, 'fb_dtsg': self.dtsg}
+ headers = {'x-fb-lsd': self.lsd}
+ else:
+ url = "https://graph.meta.ai/graphql?locale=user"
+ payload = {"access_token": self.access_token}
+ headers = {}
+ payload = {
+ **payload,
+ "fb_api_caller_class": "RelayModern",
+ "fb_api_req_friendly_name": "AbraSearchPluginDialogQuery",
+ "variables": json.dumps({"abraMessageFetchID": fetch_id}),
+ "server_timestamps": "true",
+ "doc_id": "6946734308765963",
+ }
+ headers = {
+ "authority": "graph.meta.ai",
+ "x-fb-friendly-name": "AbraSearchPluginDialogQuery",
+ **headers
+ }
+ async with self.session.post(url, headers=headers, cookies=self.cookies, data=payload) as response:
+ await raise_for_status(response, "Fetch sources failed")
+ text = await response.text()
+ if "<h1>Something Went Wrong</h1>" in text:
+ raise ResponseError("Response: Something Went Wrong")
+ try:
+ response_json = json.loads(text)
+ message = response_json["data"]["message"]
+ if message is not None:
+ searchResults = message["searchResults"]
+ if searchResults is not None:
+ return Sources(searchResults["references"])
+ except (KeyError, TypeError, json.JSONDecodeError):
+ raise RuntimeError(f"Response: {text}")
+
+ @staticmethod
+ def extract_value(text: str, key: str = None, start_str = None, end_str = '",') -> str:
+ if start_str is None:
+ start_str = f'{key}":{{"value":"'
+ start = text.find(start_str)
+ if start >= 0:
+ start+= len(start_str)
+ end = text.find(end_str, start)
+ if end >= 0:
+ return text[start:end]
+
+def generate_offline_threading_id() -> str:
+ """
+ Generates an offline threading ID.
+
+ Returns:
+ str: The generated offline threading ID.
+ """
+ # Generate a random 64-bit integer
+ random_value = random.getrandbits(64)
+
+ # Get the current timestamp in milliseconds
+ timestamp = int(time.time() * 1000)
+
+ # Combine timestamp and random value
+ threading_id = (timestamp << 22) | (random_value & ((1 << 22) - 1))
+
+ return str(threading_id)
diff --git a/g4f/Provider/needs_auth/MetaAIAccount.py b/g4f/Provider/needs_auth/MetaAIAccount.py
new file mode 100644
index 00000000..2d54f3e0
--- /dev/null
+++ b/g4f/Provider/needs_auth/MetaAIAccount.py
@@ -0,0 +1,23 @@
+from __future__ import annotations
+
+from ...typing import AsyncResult, Messages, Cookies
+from ..helper import format_prompt, get_cookies
+from ..MetaAI import MetaAI
+
+class MetaAIAccount(MetaAI):
+ needs_auth = True
+ parent = "MetaAI"
+ image_models = ["meta"]
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ cookies: Cookies = None,
+ **kwargs
+ ) -> AsyncResult:
+ cookies = get_cookies(".meta.ai", True, True) if cookies is None else cookies
+ async for chunk in cls(proxy).prompt(format_prompt(messages), cookies):
+ yield chunk
diff --git a/g4f/Provider/needs_auth/OpenRouter.py b/g4f/Provider/needs_auth/OpenRouter.py
deleted file mode 100644
index 5e0bf336..00000000
--- a/g4f/Provider/needs_auth/OpenRouter.py
+++ /dev/null
@@ -1,32 +0,0 @@
-from __future__ import annotations
-
-import requests
-
-from .Openai import Openai
-from ...typing import AsyncResult, Messages
-
-class OpenRouter(Openai):
- label = "OpenRouter"
- url = "https://openrouter.ai"
- working = False
- default_model = "mistralai/mistral-7b-instruct:free"
-
- @classmethod
- def get_models(cls):
- if not cls.models:
- url = 'https://openrouter.ai/api/v1/models'
- models = requests.get(url).json()["data"]
- cls.models = [model['id'] for model in models]
- return cls.models
-
- @classmethod
- def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- api_base: str = "https://openrouter.ai/api/v1",
- **kwargs
- ) -> AsyncResult:
- return super().create_async_generator(
- model, messages, api_base=api_base, **kwargs
- )
diff --git a/g4f/Provider/needs_auth/Openai.py b/g4f/Provider/needs_auth/OpenaiAPI.py
index 382ebada..116b5f6f 100644
--- a/g4f/Provider/needs_auth/Openai.py
+++ b/g4f/Provider/needs_auth/OpenaiAPI.py
@@ -9,7 +9,7 @@ from ...requests import StreamSession, raise_for_status
from ...errors import MissingAuthError, ResponseError
from ...image import to_data_uri
-class Openai(AsyncGeneratorProvider, ProviderModelMixin):
+class OpenaiAPI(AsyncGeneratorProvider, ProviderModelMixin):
label = "OpenAI API"
url = "https://platform.openai.com"
working = True
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index f02121e3..3a0d6b29 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -55,6 +55,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
label = "OpenAI ChatGPT"
url = "https://chatgpt.com"
working = True
+ needs_auth = True
supports_gpt_4 = True
supports_message_history = True
supports_system_message = True
diff --git a/g4f/Provider/needs_auth/PerplexityApi.py b/g4f/Provider/needs_auth/PerplexityApi.py
index 3ee65b30..85d7cc98 100644
--- a/g4f/Provider/needs_auth/PerplexityApi.py
+++ b/g4f/Provider/needs_auth/PerplexityApi.py
@@ -1,9 +1,9 @@
from __future__ import annotations
-from .Openai import Openai
+from .OpenaiAPI import OpenaiAPI
from ...typing import AsyncResult, Messages
-class PerplexityApi(Openai):
+class PerplexityApi(OpenaiAPI):
label = "Perplexity API"
url = "https://www.perplexity.ai"
working = True
diff --git a/g4f/Provider/needs_auth/Replicate.py b/g4f/Provider/needs_auth/Replicate.py
new file mode 100644
index 00000000..ec993aa4
--- /dev/null
+++ b/g4f/Provider/needs_auth/Replicate.py
@@ -0,0 +1,88 @@
+from __future__ import annotations
+
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt, filter_none
+from ...typing import AsyncResult, Messages
+from ...requests import raise_for_status
+from ...requests.aiohttp import StreamSession
+from ...errors import ResponseError, MissingAuthError
+
+class Replicate(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://replicate.com"
+ working = True
+ needs_auth = True
+ default_model = "meta/meta-llama-3-70b-instruct"
+ model_aliases = {
+ "meta-llama/Meta-Llama-3-70B-Instruct": default_model
+ }
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ api_key: str = None,
+ proxy: str = None,
+ timeout: int = 180,
+ system_prompt: str = None,
+ max_new_tokens: int = None,
+ temperature: float = None,
+ top_p: float = None,
+ top_k: float = None,
+ stop: list = None,
+ extra_data: dict = {},
+ headers: dict = {
+ "accept": "application/json",
+ },
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+ if cls.needs_auth and api_key is None:
+ raise MissingAuthError("api_key is missing")
+ if api_key is not None:
+ headers["Authorization"] = f"Bearer {api_key}"
+ api_base = "https://api.replicate.com/v1/models/"
+ else:
+ api_base = "https://replicate.com/api/models/"
+ async with StreamSession(
+ proxy=proxy,
+ headers=headers,
+ timeout=timeout
+ ) as session:
+ data = {
+ "stream": True,
+ "input": {
+ "prompt": format_prompt(messages),
+ **filter_none(
+ system_prompt=system_prompt,
+ max_new_tokens=max_new_tokens,
+ temperature=temperature,
+ top_p=top_p,
+ top_k=top_k,
+ stop_sequences=",".join(stop) if stop else None
+ ),
+ **extra_data
+ },
+ }
+ url = f"{api_base.rstrip('/')}/{model}/predictions"
+ async with session.post(url, json=data) as response:
+ message = "Model not found" if response.status == 404 else None
+ await raise_for_status(response, message)
+ result = await response.json()
+ if "id" not in result:
+ raise ResponseError(f"Invalid response: {result}")
+ async with session.get(result["urls"]["stream"], headers={"Accept": "text/event-stream"}) as response:
+ await raise_for_status(response)
+ event = None
+ async for line in response.iter_lines():
+ if line.startswith(b"event: "):
+ event = line[7:]
+ if event == b"done":
+ break
+ elif event == b"output":
+ if line.startswith(b"data: "):
+ new_text = line[6:].decode()
+ if new_text:
+ yield new_text
+ else:
+ yield "\n"
diff --git a/g4f/Provider/needs_auth/ThebApi.py b/g4f/Provider/needs_auth/ThebApi.py
index 22fc62ed..2006f7ad 100644
--- a/g4f/Provider/needs_auth/ThebApi.py
+++ b/g4f/Provider/needs_auth/ThebApi.py
@@ -1,7 +1,7 @@
from __future__ import annotations
from ...typing import CreateResult, Messages
-from .Openai import Openai
+from .OpenaiAPI import OpenaiAPI
models = {
"theb-ai": "TheB.AI",
@@ -27,7 +27,7 @@ models = {
"qwen-7b-chat": "Qwen 7B"
}
-class ThebApi(Openai):
+class ThebApi(OpenaiAPI):
label = "TheB.AI API"
url = "https://theb.ai"
working = True
@@ -58,4 +58,4 @@ class ThebApi(Openai):
"top_p": top_p,
}
}
- return super().create_async_generator(model, messages, api_base=api_base, extra_data=data, **kwargs) \ No newline at end of file
+ return super().create_async_generator(model, messages, api_base=api_base, extra_data=data, **kwargs)
diff --git a/g4f/Provider/needs_auth/WhiteRabbitNeo.py b/g4f/Provider/needs_auth/WhiteRabbitNeo.py
new file mode 100644
index 00000000..82275c1c
--- /dev/null
+++ b/g4f/Provider/needs_auth/WhiteRabbitNeo.py
@@ -0,0 +1,57 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession, BaseConnector
+
+from ...typing import AsyncResult, Messages, Cookies
+from ...requests.raise_for_status import raise_for_status
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import get_cookies, get_connector, get_random_string
+
+class WhiteRabbitNeo(AsyncGeneratorProvider):
+ url = "https://www.whiterabbitneo.com"
+ working = True
+ supports_message_history = True
+ needs_auth = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ cookies: Cookies = None,
+ connector: BaseConnector = None,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ if cookies is None:
+ cookies = get_cookies("www.whiterabbitneo.com")
+ headers = {
+ "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:123.0) Gecko/20100101 Firefox/123.0",
+ "Accept": "*/*",
+ "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Referer": f"{cls.url}/",
+ "Content-Type": "text/plain;charset=UTF-8",
+ "Origin": cls.url,
+ "Connection": "keep-alive",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "TE": "trailers"
+ }
+ async with ClientSession(
+ headers=headers,
+ cookies=cookies,
+ connector=get_connector(connector, proxy)
+ ) as session:
+ data = {
+ "messages": messages,
+ "id": get_random_string(6),
+ "enhancePrompt": False,
+ "useFunctions": False
+ }
+ async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response:
+ await raise_for_status(response)
+ async for chunk in response.content.iter_any():
+ if chunk:
+ yield chunk.decode(errors="ignore")
diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py
index 0492645d..26c50c0a 100644
--- a/g4f/Provider/needs_auth/__init__.py
+++ b/g4f/Provider/needs_auth/__init__.py
@@ -1,11 +1,22 @@
-from .Gemini import Gemini
-from .Raycast import Raycast
-from .Theb import Theb
-from .ThebApi import ThebApi
-from .OpenaiChat import OpenaiChat
-from .Poe import Poe
-from .Openai import Openai
-from .Groq import Groq
-from .OpenRouter import OpenRouter
-#from .OpenaiAccount import OpenaiAccount
-from .PerplexityApi import PerplexityApi
+from .gigachat import *
+
+#from .MetaAIAccount import MetaAIAccount
+#from .OpenaiAccount import OpenaiAccount
+
+from .BingCreateImages import BingCreateImages
+from .DeepInfra import DeepInfra
+from .DeepInfraImage import DeepInfraImage
+from .Gemini import Gemini
+from .GeminiPro import GeminiPro
+from .Groq import Groq
+from .HuggingFace import HuggingFace
+from .MetaAI import MetaAI
+from .OpenaiAPI import OpenaiAPI
+from .OpenaiChat import OpenaiChat
+from .PerplexityApi import PerplexityApi
+from .Poe import Poe
+from .Raycast import Raycast
+from .Replicate import Replicate
+from .Theb import Theb
+from .ThebApi import ThebApi
+from .WhiteRabbitNeo import WhiteRabbitNeo
diff --git a/g4f/Provider/needs_auth/gigachat/GigaChat.py b/g4f/Provider/needs_auth/gigachat/GigaChat.py
new file mode 100644
index 00000000..c9f1c011
--- /dev/null
+++ b/g4f/Provider/needs_auth/gigachat/GigaChat.py
@@ -0,0 +1,92 @@
+from __future__ import annotations
+
+import os
+import ssl
+import time
+import uuid
+
+import json
+from aiohttp import ClientSession, TCPConnector, BaseConnector
+from g4f.requests import raise_for_status
+
+from ....typing import AsyncResult, Messages
+from ...base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ....errors import MissingAuthError
+from ...helper import get_connector
+
+access_token = ""
+token_expires_at = 0
+
+class GigaChat(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://developers.sber.ru/gigachat"
+ working = True
+ supports_message_history = True
+ supports_system_message = True
+ supports_stream = True
+ needs_auth = True
+ default_model = "GigaChat:latest"
+ models = ["GigaChat:latest", "GigaChat-Plus", "GigaChat-Pro"]
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool = True,
+ proxy: str = None,
+ api_key: str = None,
+ connector: BaseConnector = None,
+ scope: str = "GIGACHAT_API_PERS",
+ update_interval: float = 0,
+ **kwargs
+ ) -> AsyncResult:
+ global access_token, token_expires_at
+ model = cls.get_model(model)
+ if not api_key:
+ raise MissingAuthError('Missing "api_key"')
+
+ cafile = os.path.join(os.path.dirname(__file__), "russian_trusted_root_ca_pem.crt")
+ ssl_context = ssl.create_default_context(cafile=cafile) if os.path.exists(cafile) else None
+ if connector is None and ssl_context is not None:
+ connector = TCPConnector(ssl_context=ssl_context)
+ async with ClientSession(connector=get_connector(connector, proxy)) as session:
+ if token_expires_at - int(time.time() * 1000) < 60000:
+ async with session.post(url="https://ngw.devices.sberbank.ru:9443/api/v2/oauth",
+ headers={"Authorization": f"Bearer {api_key}",
+ "RqUID": str(uuid.uuid4()),
+ "Content-Type": "application/x-www-form-urlencoded"},
+ data={"scope": scope}) as response:
+ await raise_for_status(response)
+ data = await response.json()
+ access_token = data['access_token']
+ token_expires_at = data['expires_at']
+
+ async with session.post(url="https://gigachat.devices.sberbank.ru/api/v1/chat/completions",
+ headers={"Authorization": f"Bearer {access_token}"},
+ json={
+ "model": model,
+ "messages": messages,
+ "stream": stream,
+ "update_interval": update_interval,
+ **kwargs
+ }) as response:
+ await raise_for_status(response)
+
+ async for line in response.content:
+ if not stream:
+ yield json.loads(line.decode("utf-8"))['choices'][0]['message']['content']
+ return
+
+ if line and line.startswith(b"data:"):
+ line = line[6:-1] # remove "data: " prefix and "\n" suffix
+ if line.strip() == b"[DONE]":
+ return
+ else:
+ msg = json.loads(line.decode("utf-8"))['choices'][0]
+ content = msg['delta']['content']
+
+ if content:
+ yield content
+
+ if 'finish_reason' in msg:
+ return
diff --git a/g4f/Provider/needs_auth/gigachat/__init__.py b/g4f/Provider/needs_auth/gigachat/__init__.py
new file mode 100644
index 00000000..c9853742
--- /dev/null
+++ b/g4f/Provider/needs_auth/gigachat/__init__.py
@@ -0,0 +1,2 @@
+from .GigaChat import GigaChat
+
diff --git a/g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt b/g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt
new file mode 100644
index 00000000..4c143a21
--- /dev/null
+++ b/g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt
@@ -0,0 +1,33 @@
+-----BEGIN CERTIFICATE-----
+MIIFwjCCA6qgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwcDELMAkGA1UEBhMCUlUx
+PzA9BgNVBAoMNlRoZSBNaW5pc3RyeSBvZiBEaWdpdGFsIERldmVsb3BtZW50IGFu
+ZCBDb21tdW5pY2F0aW9uczEgMB4GA1UEAwwXUnVzc2lhbiBUcnVzdGVkIFJvb3Qg
+Q0EwHhcNMjIwMzAxMjEwNDE1WhcNMzIwMjI3MjEwNDE1WjBwMQswCQYDVQQGEwJS
+VTE/MD0GA1UECgw2VGhlIE1pbmlzdHJ5IG9mIERpZ2l0YWwgRGV2ZWxvcG1lbnQg
+YW5kIENvbW11bmljYXRpb25zMSAwHgYDVQQDDBdSdXNzaWFuIFRydXN0ZWQgUm9v
+dCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMfFOZ8pUAL3+r2n
+qqE0Zp52selXsKGFYoG0GM5bwz1bSFtCt+AZQMhkWQheI3poZAToYJu69pHLKS6Q
+XBiwBC1cvzYmUYKMYZC7jE5YhEU2bSL0mX7NaMxMDmH2/NwuOVRj8OImVa5s1F4U
+zn4Kv3PFlDBjjSjXKVY9kmjUBsXQrIHeaqmUIsPIlNWUnimXS0I0abExqkbdrXbX
+YwCOXhOO2pDUx3ckmJlCMUGacUTnylyQW2VsJIyIGA8V0xzdaeUXg0VZ6ZmNUr5Y
+Ber/EAOLPb8NYpsAhJe2mXjMB/J9HNsoFMBFJ0lLOT/+dQvjbdRZoOT8eqJpWnVD
+U+QL/qEZnz57N88OWM3rabJkRNdU/Z7x5SFIM9FrqtN8xewsiBWBI0K6XFuOBOTD
+4V08o4TzJ8+Ccq5XlCUW2L48pZNCYuBDfBh7FxkB7qDgGDiaftEkZZfApRg2E+M9
+G8wkNKTPLDc4wH0FDTijhgxR3Y4PiS1HL2Zhw7bD3CbslmEGgfnnZojNkJtcLeBH
+BLa52/dSwNU4WWLubaYSiAmA9IUMX1/RpfpxOxd4Ykmhz97oFbUaDJFipIggx5sX
+ePAlkTdWnv+RWBxlJwMQ25oEHmRguNYf4Zr/Rxr9cS93Y+mdXIZaBEE0KS2iLRqa
+OiWBki9IMQU4phqPOBAaG7A+eP8PAgMBAAGjZjBkMB0GA1UdDgQWBBTh0YHlzlpf
+BKrS6badZrHF+qwshzAfBgNVHSMEGDAWgBTh0YHlzlpfBKrS6badZrHF+qwshzAS
+BgNVHRMBAf8ECDAGAQH/AgEEMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsF
+AAOCAgEAALIY1wkilt/urfEVM5vKzr6utOeDWCUczmWX/RX4ljpRdgF+5fAIS4vH
+tmXkqpSCOVeWUrJV9QvZn6L227ZwuE15cWi8DCDal3Ue90WgAJJZMfTshN4OI8cq
+W9E4EG9wglbEtMnObHlms8F3CHmrw3k6KmUkWGoa+/ENmcVl68u/cMRl1JbW2bM+
+/3A+SAg2c6iPDlehczKx2oa95QW0SkPPWGuNA/CE8CpyANIhu9XFrj3RQ3EqeRcS
+AQQod1RNuHpfETLU/A2gMmvn/w/sx7TB3W5BPs6rprOA37tutPq9u6FTZOcG1Oqj
+C/B7yTqgI7rbyvox7DEXoX7rIiEqyNNUguTk/u3SZ4VXE2kmxdmSh3TQvybfbnXV
+4JbCZVaqiZraqc7oZMnRoWrXRG3ztbnbes/9qhRGI7PqXqeKJBztxRTEVj8ONs1d
+WN5szTwaPIvhkhO3CO5ErU2rVdUr89wKpNXbBODFKRtgxUT70YpmJ46VVaqdAhOZ
+D9EUUn4YaeLaS8AjSF/h7UkjOibNc4qVDiPP+rkehFWM66PVnP1Msh93tc+taIfC
+EYVMxjh8zNbFuoc7fzvvrFILLe7ifvEIUqSVIC/AzplM/Jxw7buXFeGP1qVCBEHq
+391d/9RAfaZ12zkwFsl+IKwE/OZxW8AHa9i1p4GO0YSNuczzEm4=
+-----END CERTIFICATE----- \ No newline at end of file