diff --git a/g4f/Provider/AutonomousAI.py b/g4f/Provider/AutonomousAI.py index dcd81c5726f..990fb48e9f6 100644 --- a/g4f/Provider/AutonomousAI.py +++ b/g4f/Provider/AutonomousAI.py @@ -6,8 +6,8 @@ from ..typing import AsyncResult, Messages from ..requests.raise_for_status import raise_for_status +from ..providers.response import FinishReason from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt class AutonomousAI(AsyncGeneratorProvider, ProviderModelMixin): url = "https://www.autonomous.ai/anon/" @@ -32,7 +32,6 @@ class AutonomousAI(AsyncGeneratorProvider, ProviderModelMixin): "qwen-2.5-coder-32b": "qwen_coder", "hermes-3": "hermes", "llama-3.2-90b": "vision", - "llama-3.3-70b": "summary" } @classmethod @@ -57,12 +56,8 @@ async def create_async_generator( } async with ClientSession(headers=headers) as session: - prompt = format_prompt(messages) - - # Encode message - message = [{"role": "user", "content": prompt}] - message_json = json.dumps(message) - encoded_message = base64.b64encode(message_json.encode('utf-8')).decode('utf-8') + message_json = json.dumps(messages) + encoded_message = base64.b64encode(message_json.encode()).decode(errors="ignore") data = { "messages": encoded_message, @@ -84,7 +79,9 @@ async def create_async_generator( chunk_data = json.loads(chunk_str.replace("data: ", "")) if "choices" in chunk_data and chunk_data["choices"]: delta = chunk_data["choices"][0].get("delta", {}) - if "content" in delta: + if "content" in delta and delta["content"]: yield delta["content"] + if "finish_reason" in chunk_data and chunk_data["finish_reason"]: + yield FinishReason(chunk_data["finish_reason"]) except json.JSONDecodeError: continue diff --git a/g4f/Provider/CablyAI.py b/g4f/Provider/CablyAI.py index 594a866d655..7bbbb430d40 100644 --- a/g4f/Provider/CablyAI.py +++ b/g4f/Provider/CablyAI.py @@ -1,38 +1,26 @@ from __future__ import annotations -from aiohttp import ClientSession -import json - from ..typing import AsyncResult, Messages -from ..requests.raise_for_status import raise_for_status -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt - +from .needs_auth import OpenaiAPI -class CablyAI(AsyncGeneratorProvider, ProviderModelMixin): +class CablyAI(OpenaiAPI): url = "https://cablyai.com" - api_endpoint = "https://cablyai.com/v1/chat/completions" - + login_url = None + needs_auth = False + api_base = "https://cablyai.com/v1" working = True - supports_stream = True - supports_system_message = True - supports_message_history = True default_model = "Cably-80B" models = [default_model] - model_aliases = {"cably-80b": default_model} @classmethod - async def create_async_generator( + def create_async_generator( cls, model: str, messages: Messages, - stream: bool = False, - proxy: str = None, **kwargs ) -> AsyncResult: - model = cls.get_model(model) headers = { 'Accept': '*/*', 'Accept-Language': 'en-US,en;q=0.9', @@ -41,31 +29,9 @@ async def create_async_generator( 'Referer': 'https://cablyai.com/chat', 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36' } - - async with ClientSession(headers=headers) as session: - data = { - "model": model, - "messages": messages, - "stream": stream - } - - async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: - await raise_for_status(response) - buffer = "" - async for chunk in response.content: - if chunk: - buffer += chunk.decode() - while "\n\n" in buffer: - chunk_data, buffer = buffer.split("\n\n", 1) - if chunk_data.startswith("data: "): - try: - json_data = json.loads(chunk_data[6:]) - if "choices" in json_data and json_data["choices"]: - content = json_data["choices"][0]["delta"].get("content", "") - if content: - yield content - except json.JSONDecodeError: - # Skip invalid JSON - pass - elif chunk_data.strip() == "data: [DONE]": - return + return super().create_async_generator( + model=model, + messages=messages, + headers=headers, + **kwargs + ) \ No newline at end of file diff --git a/g4f/Provider/ChatGLM.py b/g4f/Provider/ChatGLM.py index 7869f8492fe..f0b2da98976 100644 --- a/g4f/Provider/ChatGLM.py +++ b/g4f/Provider/ChatGLM.py @@ -8,7 +8,6 @@ from ..typing import AsyncResult, Messages from ..requests.raise_for_status import raise_for_status from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt class ChatGLM(AsyncGeneratorProvider, ProviderModelMixin): url = "https://chatglm.cn" @@ -17,7 +16,7 @@ class ChatGLM(AsyncGeneratorProvider, ProviderModelMixin): working = True supports_stream = True supports_system_message = False - supports_message_history = True + supports_message_history = False default_model = "all-tools-230b" models = [default_model] @@ -47,7 +46,6 @@ async def create_async_generator( } async with ClientSession(headers=headers) as session: - prompt = format_prompt(messages) data = { "assistant_id": "65940acff94777010aa6b796", "conversation_id": "", @@ -62,17 +60,19 @@ async def create_async_generator( }, "messages": [ { - "role": "user", + "role": message["role"], "content": [ { "type": "text", - "text": prompt + "text": message["content"] } ] } + for message in messages ] } + yield_text = 0 async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: await raise_for_status(response) async for chunk in response.content: @@ -85,8 +85,9 @@ async def create_async_generator( if parts: content = parts[0].get('content', []) if content: - text = content[0].get('text', '') + text = content[0].get('text', '')[yield_text:] if text: yield text + yield_text += len(text) except json.JSONDecodeError: pass diff --git a/g4f/Provider/ChatGptEs.py b/g4f/Provider/ChatGptEs.py index 9222131e474..f5e2ece6612 100644 --- a/g4f/Provider/ChatGptEs.py +++ b/g4f/Provider/ChatGptEs.py @@ -2,7 +2,6 @@ import os import re -import json from aiohttp import ClientSession @@ -17,20 +16,14 @@ class ChatGptEs(AsyncGeneratorProvider, ProviderModelMixin): working = True supports_stream = True - supports_system_message = True - supports_message_history = True + supports_system_message = False + supports_message_history = False default_model = 'gpt-4o' models = ['gpt-4', default_model, 'gpt-4o-mini'] SYSTEM_PROMPT = "Your default language is English. Always respond in English unless the user's message is in a different language. If the user's message is not in English, respond in the language of the user's message. Maintain this language behavior throughout the conversation unless explicitly instructed otherwise. User input:" - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.models: - return model - return cls.model_aliases[model] - @classmethod async def create_async_generator( cls, @@ -68,10 +61,9 @@ async def create_async_generator( 'wpaicg_chat_client_id': os.urandom(5).hex(), 'wpaicg_chat_history': None } - print(payload['message']) async with session.post(cls.api_endpoint, headers=headers, data=payload) as response: - response.raise_for_status() + await raise_for_status(response) result = await response.json() if "Du musst das Kästchen anklicken!" in result['data']: raise ValueError(result['data']) diff --git a/g4f/Provider/ChatGptt.py b/g4f/Provider/ChatGptt.py index 19c7287a5ff..f61b95fee98 100644 --- a/g4f/Provider/ChatGptt.py +++ b/g4f/Provider/ChatGptt.py @@ -2,7 +2,6 @@ import os import re -import json from aiohttp import ClientSession @@ -23,12 +22,6 @@ class ChatGptt(AsyncGeneratorProvider, ProviderModelMixin): default_model = 'gpt-4o' models = ['gpt-4', default_model, 'gpt-4o-mini'] - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.models: - return model - return cls.model_aliases[model] - @classmethod async def create_async_generator( cls, @@ -64,7 +57,7 @@ async def create_async_generator( 'wpaicg_chat_history': None } - async with session.post(cls.api_endpoint, headers=headers, data=payload) as response: + async with session.post(cls.api_endpoint, headers=headers, data=payload, proxy=proxy) as response: await raise_for_status(response) result = await response.json() - yield result['data'] + yield result['data'] \ No newline at end of file diff --git a/g4f/Provider/DDG.py b/g4f/Provider/DDG.py index 22f32ef39fe..81d7e3fcf63 100644 --- a/g4f/Provider/DDG.py +++ b/g4f/Provider/DDG.py @@ -1,15 +1,23 @@ from __future__ import annotations -from aiohttp import ClientSession, ClientTimeout, ClientError +from aiohttp import ClientSession, ClientTimeout import json import asyncio import random -from ..typing import AsyncResult, Messages +from ..typing import AsyncResult, Messages, Cookies from ..requests.raise_for_status import raise_for_status from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .helper import format_prompt +from ..providers.response import FinishReason, JsonConversation +class Conversation(JsonConversation): + vqd: str = None + message_history: Messages = [] + cookies: dict = {} + + def __init__(self, model: str): + self.model = model class DDG(AsyncGeneratorProvider, ProviderModelMixin): label = "DuckDuckGo AI Chat" @@ -74,42 +82,54 @@ async def create_async_generator( messages: Messages, proxy: str = None, timeout: int = 30, + cookies: Cookies = None, + conversation: Conversation = None, + return_conversation: bool = False, **kwargs ) -> AsyncResult: model = cls.get_model(model) - - async with ClientSession(timeout=ClientTimeout(total=timeout)) as session: - try: - # Fetch VQD token with retries - vqd = await cls.fetch_vqd(session) - - headers = { - "accept": "text/event-stream", - "content-type": "application/json", - "x-vqd-4": vqd, - "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36" - } - - data = { - "model": model, - "messages": [{"role": "user", "content": format_prompt(messages)}], - } - - async with session.post(cls.api_endpoint, json=data, headers=headers, proxy=proxy) as response: - await raise_for_status(response) - async for line in response.content: - line = line.decode("utf-8").strip() - if line.startswith("data:"): - try: - message = json.loads(line[5:].strip()) - if "message" in message: + if cookies is None and conversation is not None: + cookies = conversation.cookies + async with ClientSession(timeout=ClientTimeout(total=timeout), cookies=cookies) as session: + # Fetch VQD token + if conversation is None: + conversation = Conversation(model) + conversation.vqd = await cls.fetch_vqd(session) + conversation.message_history = [{"role": "user", "content": format_prompt(messages)}] + else: + conversation.message_history.append(messages[-1]) + headers = { + "accept": "text/event-stream", + "content-type": "application/json", + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36", + "x-vqd-4": conversation.vqd, + } + data = { + "model": model, + "messages": conversation.message_history, + } + async with session.post(cls.api_endpoint, json=data, headers=headers, proxy=proxy) as response: + await raise_for_status(response) + reason = None + full_message = "" + async for line in response.content: + line = line.decode("utf-8").strip() + if line.startswith("data:"): + try: + message = json.loads(line[5:].strip()) + if "message" in message: + if message["message"]: yield message["message"] - except json.JSONDecodeError: - continue - - except ClientError as e: - raise Exception(f"HTTP ClientError occurred: {e}") - except asyncio.TimeoutError: - raise Exception("Request timed out.") - except Exception as e: - raise Exception(f"An error occurred: {str(e)}") + full_message += message["message"] + reason = "length" + else: + reason = "stop" + except json.JSONDecodeError: + continue + if return_conversation: + conversation.message_history.append({"role": "assistant", "content": full_message}) + conversation.vqd = response.headers.get("x-vqd-4", conversation.vqd) + conversation.cookies = {n: c.value for n, c in session.cookie_jar.filter_cookies(cls.url).items()} + yield conversation + if reason is not None: + yield FinishReason(reason) \ No newline at end of file diff --git a/g4f/Provider/ImageLabs.py b/g4f/Provider/ImageLabs.py index c52860d0dd3..2493cf4afd9 100644 --- a/g4f/Provider/ImageLabs.py +++ b/g4f/Provider/ImageLabs.py @@ -48,7 +48,7 @@ async def create_async_generator( } async with ClientSession(headers=headers) as session: - prompt = messages[-1]["content"] + prompt = messages[-1]["content"] if prompt is None else prompt # Generate image payload = { diff --git a/g4f/Provider/Pizzagpt.py b/g4f/Provider/Pizzagpt.py index 16946a1e117..9829e59d1a5 100644 --- a/g4f/Provider/Pizzagpt.py +++ b/g4f/Provider/Pizzagpt.py @@ -46,4 +46,6 @@ async def create_async_generator( response_json = await response.json() content = response_json.get("answer", response_json).get("content") if content: + if "misuse detected. please get in touch" in content: + raise ValueError(content) yield content diff --git a/g4f/Provider/PollinationsAI.py b/g4f/Provider/PollinationsAI.py index f7328b30c86..7755c930e0f 100644 --- a/g4f/Provider/PollinationsAI.py +++ b/g4f/Provider/PollinationsAI.py @@ -11,14 +11,13 @@ from ..requests.raise_for_status import raise_for_status from ..typing import AsyncResult, Messages from ..image import ImageResponse -from .helper import format_prompt class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin): label = "Pollinations AI" url = "https://pollinations.ai" working = True - supports_stream = True + supports_stream = False supports_system_message = True supports_message_history = True @@ -172,9 +171,9 @@ async def _generate_image( params = {k: v for k, v in params.items() if v is not None} async with ClientSession(headers=headers) as session: - prompt = quote(messages[-1]["content"] if prompt is None else prompt) + prompt = messages[-1]["content"] if prompt is None else prompt param_string = "&".join(f"{k}={v}" for k, v in params.items()) - url = f"{cls.image_api_endpoint}/prompt/{prompt}?{param_string}" + url = f"{cls.image_api_endpoint}/prompt/{quote(prompt)}?{param_string}" async with session.head(url, proxy=proxy) as response: if response.status == 200: diff --git a/g4f/Provider/hf_space/Qwen_Qwen_2_72B_Instruct.py b/g4f/Provider/hf_space/Qwen_Qwen_2_72B_Instruct.py index 573c64b8ecd..a49a0debb69 100644 --- a/g4f/Provider/hf_space/Qwen_Qwen_2_72B_Instruct.py +++ b/g4f/Provider/hf_space/Qwen_Qwen_2_72B_Instruct.py @@ -1,6 +1,5 @@ from __future__ import annotations -import asyncio import aiohttp import json import uuid @@ -9,7 +8,7 @@ from ...typing import AsyncResult, Messages from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin from ..helper import format_prompt - +from ... import debug class Qwen_Qwen_2_72B_Instruct(AsyncGeneratorProvider, ProviderModelMixin): url = "https://qwen-qwen2-72b-instruct.hf.space" @@ -49,10 +48,12 @@ def generate_session_hash(): } # Prepare the prompt + system_prompt = "\n".join([message["content"] for message in messages if message["role"] == "system"]) + messages = [message for message in messages if message["role"] != "system"] prompt = format_prompt(messages) payload_join = { - "data": [prompt, [], ""], + "data": [prompt, [], system_prompt], "event_data": None, "fn_index": 0, "trigger_id": 11, @@ -87,7 +88,7 @@ def generate_session_hash(): if decoded_line.startswith('data: '): try: json_data = json.loads(decoded_line[6:]) - + # Look for generation stages if json_data.get('msg') == 'process_generating': if 'output' in json_data and 'data' in json_data['output']: @@ -97,10 +98,10 @@ def generate_session_hash(): if isinstance(item, list) and len(item) > 1: fragment = str(item[1]) # Ignore [0, 1] type fragments and duplicates - if not re.match(r'^\[.*\]$', fragment) and fragment not in full_response: + if not re.match(r'^\[.*\]$', fragment) and not full_response.endswith(fragment): full_response += fragment yield fragment - + # Check for completion if json_data.get('msg') == 'process_completed': # Final check to ensure we get the complete response @@ -117,8 +118,6 @@ def generate_session_hash(): if final_full_response: yield final_full_response break - + except json.JSONDecodeError: - print("Could not parse JSON:", decoded_line) - except Exception as e: - print(f"Error processing response: {e}") + debug.log("Could not parse JSON:", decoded_line) diff --git a/g4f/Provider/needs_auth/Custom.py b/g4f/Provider/needs_auth/Custom.py new file mode 100644 index 00000000000..d78e5e28b7e --- /dev/null +++ b/g4f/Provider/needs_auth/Custom.py @@ -0,0 +1,11 @@ +from __future__ import annotations + +from .OpenaiAPI import OpenaiAPI + +class Custom(OpenaiAPI): + label = "Custom" + url = None + login_url = "http://localhost:8080" + working = True + api_base = "http://localhost:8080/v1" + needs_auth = False \ No newline at end of file diff --git a/g4f/Provider/needs_auth/DeepSeek.py b/g4f/Provider/needs_auth/DeepSeek.py index 0f1b96d6dd4..b0a40898bed 100644 --- a/g4f/Provider/needs_auth/DeepSeek.py +++ b/g4f/Provider/needs_auth/DeepSeek.py @@ -12,4 +12,4 @@ class DeepSeek(OpenaiAPI): supports_stream = True supports_message_history = True default_model = "deepseek-chat" - models = [default_model] + fallback_models = [default_model] diff --git a/g4f/Provider/needs_auth/OpenaiAPI.py b/g4f/Provider/needs_auth/OpenaiAPI.py index 75c3574fd28..3cc558bdb89 100644 --- a/g4f/Provider/needs_auth/OpenaiAPI.py +++ b/g4f/Provider/needs_auth/OpenaiAPI.py @@ -36,7 +36,7 @@ def get_models(cls, api_key: str = None, api_base: str = None) -> list[str]: response = requests.get(f"{api_base}/models", headers=headers) raise_for_status(response) data = response.json() - cls.models = [model.get("id") for model in data.get("data")] + cls.models = [model.get("id") for model in (data.get("data") if isinstance(data, dict) else data)] cls.models.sort() except Exception as e: debug.log(e) @@ -136,6 +136,7 @@ async def create_async_generator( finish = cls.read_finish_reason(choice) if finish is not None: yield finish + break @staticmethod def read_finish_reason(choice: dict) -> Optional[FinishReason]: diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py index 119a118d261..96241d6abc4 100644 --- a/g4f/Provider/needs_auth/__init__.py +++ b/g4f/Provider/needs_auth/__init__.py @@ -3,6 +3,7 @@ from .BingCreateImages import BingCreateImages from .Cerebras import Cerebras from .CopilotAccount import CopilotAccount +from .Custom import Custom from .DeepInfra import DeepInfra from .DeepInfraImage import DeepInfraImage from .DeepSeek import DeepSeek diff --git a/g4f/client/__init__.py b/g4f/client/__init__.py index cd2a43feee0..69ed37feb65 100644 --- a/g4f/client/__init__.py +++ b/g4f/client/__init__.py @@ -246,7 +246,6 @@ def create( model, self.provider if provider is None else provider, stream, - ignored, ignore_working, ignore_stream, ) @@ -535,7 +534,6 @@ def create( model, self.provider if provider is None else provider, stream, - ignored, ignore_working, ignore_stream, ) diff --git a/g4f/gui/server/api.py b/g4f/gui/server/api.py index f7f0f565c29..ff4358c24c9 100644 --- a/g4f/gui/server/api.py +++ b/g4f/gui/server/api.py @@ -144,12 +144,17 @@ def decorated_log(text: str): debug.log = decorated_log proxy = os.environ.get("G4F_PROXY") provider = kwargs.get("provider") - model, provider_handler = get_model_and_provider( - kwargs.get("model"), provider, - stream=True, - ignore_stream=True, - logging=False - ) + try: + model, provider_handler = get_model_and_provider( + kwargs.get("model"), provider, + stream=True, + ignore_stream=True, + logging=False + ) + except Exception as e: + logger.exception(e) + yield self._format_json('error', get_error_message(e)) + return params = { **(provider_handler.get_parameters(as_json=True) if hasattr(provider_handler, "get_parameters") else {}), "model": model,